[关闭]
@zhuqingzhang 2018-09-28T10:31:00.000000Z 字数 37752 阅读 3155

SVO代码分析

未分类

frame.h

  1. g2oFrameSE3(VertexSE3Expmap); //定义了一个位姿结点
  2. list<Feature*>Features;
  3. vector<cv::Mat> ImgPyr; //存放图像金字塔
  4. class Frame
  5. {
  6. static int frame_counter_; //!< Counts the number of created frames. Used to set the unique id.
  7. int id_; //!< Unique id of the frame.
  8. double timestamp_; //!< Timestamp of when the image was recorded.
  9. vk::AbstractCamera* cam_; //!< Camera model.
  10. Sophus::SE3 T_f_w_; //!< Transform (f)rame from (w)orld. 它的逆为该帧在世界坐标系下的位姿。
  11. Matrix<double, 6, 6> Cov_; //!< Covariance.
  12. ImgPyr img_pyr_; //!< Image Pyramid.
  13. Features fts_; //!< List of features in the image.
  14. vector<Feature*> key_pts_; //!< Five features and associated 3D points which are used to detect if two frames have overlapping field of view.
  15. bool is_keyframe_; //!< Was this frames selected as keyframe?
  16. g2oFrameSE3* v_kf_; //!< Temporary pointer to the g2o node object of the keyframe.
  17. int last_published_ts_; //!< Timestamp of last publishing.
  18. Frame(vk::AbstractCamera* cam, const cv::Mat& img, double timestamp);
  19. ~Frame();
  20. /// Initialize new frame and create image pyramid.
  21. void initFrame(const cv::Mat& img);//初始化新的帧并创建图像金字塔。初始化金字塔时用frame_utils中的createImgPyrmaid.其中的参数n_level为Max(config::npyrlevels,config::kitMaxLevel);
  22. /// Select this frame as keyframe.
  23. void setKeyframe(); //setKeyPoints();is_keyframe=true;
  24. /// Add a feature to the image
  25. void addFeature(Feature* ftr);//将特征ftr存储在fts_中。
  26. /// The KeyPoints are those five features which are closest to the 4 image corners
  27. /// and to the center and which have a 3D point assigned. These points are used
  28. /// to quickly check whether two frames have overlapping field of view.
  29. void setKeyPoints(); //遍历fts_中的所有特征,对每个特这个进行checkKeyPoint,选出5个关键点,对应于key_pts_;这5个点的feature是位于图像4个角和中心的点,且必须有对应的已知3D points,这些点用于快速检测两帧是否有共是
  30. /// Check if we can select five better key-points.
  31. void checkKeyPoints(Feature* ftr);// 中间点要尽可能离中心近,四个角的点要尽量离中心点远。key_pts_中的五个点的顺序一次是中间,右上,右下,左下,左上
  32. /// If a point is deleted, we must remove the corresponding key-point.
  33. void removeKeyPoint(Feature* ftr);//将keyPoint移除后,进行setKeyPoints(),保证key_pts_始终有5个点
  34. /// Return number of point observations.
  35. inline size_t nObs() const { return fts_.size(); }
  36. /// Check if a point in (w)orld coordinate frame is visible in the image.
  37. bool isVisible(const Vector3d& xyz_w) const;
  38. /// Full resolution image stored in the frame.
  39. inline const cv::Mat& img() const { return img_pyr_[0]; }
  40. /// Was this frame selected as keyframe?
  41. inline bool isKeyframe() const { return is_keyframe_; }
  42. /// Transforms point coordinates in world-frame (w) to camera pixel coordinates (c).
  43. inline Vector2d w2c(const Vector3d& xyz_w) const { return cam_->world2cam( T_f_w_ * xyz_w ); }//世界到像素
  44. /// Transforms pixel coordinates (c) to frame unit sphere coordinates (f).
  45. inline Vector3d c2f(const Vector2d& px) const { return cam_->cam2world(px[0], px[1]); }//像素到相机
  46. /// Transforms pixel coordinates (c) to frame unit sphere coordinates (f).
  47. inline Vector3d c2f(const double x, const double y) const { return cam_->cam2world(x, y); }//像素到相机
  48. /// Transforms point coordinates in world-frame (w) to camera-frams (f).
  49. inline Vector3d w2f(const Vector3d& xyz_w) const { return T_f_w_ * xyz_w; } //世界到相机
  50. /// Transforms point from frame unit sphere (f) frame to world coordinate frame (w).
  51. inline Vector3d f2w(const Vector3d& f) const { return T_f_w_.inverse() * f; }//相机到世界
  52. /// Projects Point from unit sphere (f) in camera pixels (c).
  53. inline Vector2d f2c(const Vector3d& f) const { return cam_->world2cam( f ); }//相机到像素
  54. /// Return the pose of the frame in the (w)orld coordinate frame.
  55. inline Vector3d pos() const { return T_f_w_.inverse().translation(); } //返回该帧在世界坐标系下的位移
  56. }
  57. /// Some helper functions for the frame object.
  58. namespace frame_utils {
  59. /// Creates an image pyramid of half-sampled images.
  60. void createImgPyramid(const cv::Mat& img_level_0, int n_levels, ImgPyr& pyr);
  61. /// Get the average depth of the features in the image.
  62. bool getSceneDepth(const Frame& frame, double& depth_mean, double& depth_min);//获得图像中features的平均深度和frame中所能观测到的points的最小深度
  63. } // namespace frame_utils

feature.h

  1. /// A salient image region that is tracked across frames.
  2. struct Feature
  3. {
  4. EIGEN_MAKE_ALIGNED_OPERATOR_NEW
  5. enum FeatureType {
  6. CORNER,
  7. EDGELET
  8. };
  9. FeatureType type; //!< Type can be corner or edgelet.
  10. Frame* frame; //!< Pointer to frame in which the feature was detected.
  11. Vector2d px; //!< Coordinates in pixels on pyramid level 0.
  12. Vector3d f; //!< Unit-bearing vector of the feature. // f = [(u-ox)/fx , (v-oy)/fy , 1] ; f * depth --> [x , y , z] : 3d in this frame f为像素反投影到归一化平面
  13. int level; //!< Image pyramid level where feature was extracted.
  14. Point* point; //!< Pointer to 3D point which corresponds to the feature.
  15. Vector2d grad; //!< Dominant gradient direction for edglets, normalized.
  16. Feature(Frame* _frame, const Vector2d& _px, int _level) :
  17. type(CORNER),
  18. frame(_frame),
  19. px(_px),
  20. f(frame->cam_->cam2world(px)),
  21. level(_level),
  22. point(NULL),
  23. grad(1.0,0.0)
  24. {}
  25. Feature(Frame* _frame, const Vector2d& _px, const Vector3d& _f, int _level) :
  26. type(CORNER),
  27. frame(_frame),
  28. px(_px),
  29. f(_f),
  30. level(_level),
  31. point(NULL),
  32. grad(1.0,0.0)
  33. {}
  34. Feature(Frame* _frame, Point* _point, const Vector2d& _px, const Vector3d& _f, int _level) :
  35. type(CORNER),
  36. frame(_frame),
  37. px(_px),
  38. f(_f),
  39. level(_level),
  40. point(_point),
  41. grad(1.0,0.0)
  42. {}
  43. };

Point.h

  1. typedef g2o::VertexSBAPointXYZ g2oPoint;
  2. class Feature;
  3. typedef Matrix<double, 2, 3> Matrix23d;
  4. /// A 3D point on the surface of the scene.
  5. class Point : boost::noncopyable
  6. {
  7. public:
  8. EIGEN_MAKE_ALIGNED_OPERATOR_NEW
  9. enum PointType {
  10. TYPE_DELETED,
  11. TYPE_CANDIDATE,
  12. TYPE_UNKNOWN,
  13. TYPE_GOOD
  14. };
  15. static int point_counter_; //!< Counts the number of created points. Used to set the unique id.
  16. int id_; //!< Unique ID of the point.
  17. Vector3d pos_; //!< 3d pos of the point in the world coordinate frame.
  18. Vector3d normal_; //!< Surface normal at point.
  19. Matrix3d normal_information_; //!< Inverse covariance matrix of normal estimation.
  20. bool normal_set_; //!< Flag whether the surface normal was estimated or not.
  21. list<Feature*> obs_; //!< References to keyframes which observe the point. 该Point能被多个关键帧观测到;obs_存储了能观测到该点的关键帧中对应的feature
  22. size_t n_obs_; //!< Number of obervations: Keyframes AND successful reprojections in intermediate frames.
  23. g2oPoint* v_pt_; //!< Temporary pointer to the point-vertex in g2o during bundle adjustment.
  24. int last_published_ts_; //!< Timestamp of last publishing.
  25. int last_projected_kf_id_; //!< Flag for the reprojection: don't reproject a pt twice.
  26. PointType type_; //!< Quality of the point.
  27. int n_failed_reproj_; //!< Number of failed reprojections. Used to assess the quality of the point.
  28. int n_succeeded_reproj_; //!< Number of succeeded reprojections. Used to assess the quality of the point.
  29. int last_structure_optim_; //!< Timestamp of last point optimization
  30. Point(const Vector3d& pos);
  31. Point(const Vector3d& pos, Feature* ftr); //每初始化一个点,point_count_++,其对应的id 为point_counter_当前值
  32. ~Point();
  33. /// Add a reference to a frame.
  34. void addFrameRef(Feature* ftr); //obs_.push_front(ftr), n_obs_++;
  35. /// Remove reference to a frame.
  36. bool deleteFrameRef(Frame* frame); //将frame对应的feature从obs_中除
  37. /// Initialize point normal. The inital estimate will point towards the frame.
  38. void initNormal();//初始化点的normal_和 normal_information
  39. /// Check whether mappoint has reference to a frame.
  40. Feature* findFrameRef(Frame* frame);//对obs_便利,如果obs_中的feature对应的frame是该传入的帧,则返回对应的feature
  41. /// Get Frame with similar viewpoint.
  42. bool getCloseViewObs(const Vector3d& pos, Feature*& obs) const; //pos为frame的空间位置。遍历obs_, 用于检测与当前帧同时观测到某3D点的夹角最小的关键帧,并将夹角最小的关键帧对应的feature传给obs。(在Fearue alignment中需要)。 当角度大于60度,认为无相近关键帧,返回false。
  43. /// Get number of observations.
  44. inline size_t nRefs() const { return obs_.size(); }
  45. /// Optimize point position through minimizing the reprojection error.
  46. void optimize(const size_t n_iter);//用point->obs_来进行优化。
  47. /// Jacobian of point projection on unit plane (focal length = 1) in frame (f).
  48. inline static void jacobian_xyz2uv(
  49. const Vector3d& p_in_f,//相机坐标系下的3d点
  50. const Matrix3d& R_f_w,
  51. Matrix23d& point_jac)
  52. {
  53. const double z_inv = 1.0/p_in_f[2];
  54. const double z_inv_sq = z_inv*z_inv;
  55. point_jac(0, 0) = z_inv;
  56. point_jac(0, 1) = 0.0;
  57. point_jac(0, 2) = -p_in_f[0] * z_inv_sq;
  58. point_jac(1, 0) = 0.0;
  59. point_jac(1, 1) = z_inv;
  60. point_jac(1, 2) = -p_in_f[1] * z_inv_sq;
  61. point_jac = - point_jac * R_f_w;
  62. }
  63. };

feature_detection.h

  1. namespace svo {
  2. /// Implementation of various feature detectors.
  3. namespace feature_detection {
  4. /// Temporary container used for corner detection. Features are initialized from these.
  5. struct Corner
  6. {
  7. int x; //!< x-coordinate of corner in the image.
  8. int y; //!< y-coordinate of corner in the image.
  9. int level; //!< pyramid level of the corner.
  10. float score; //!< shi-tomasi score of the corner //gftt
  11. float angle; //!< for gradient-features: dominant gradient angle.
  12. Corner(int x, int y, float score, int level, float angle) :
  13. x(x), y(y), level(level), score(score), angle(angle)
  14. {}
  15. };
  16. typedef vector<Corner> Corners;
  17. /// All detectors should derive from this abstract class.
  18. class AbstractDetector
  19. {
  20. public:
  21. AbstractDetector(
  22. const int img_width,
  23. const int img_height,
  24. const int cell_size,
  25. const int n_pyr_levels);
  26. virtual ~AbstractDetector() {};
  27. virtual void detect(
  28. Frame* frame,
  29. const ImgPyr& img_pyr,
  30. const double detection_threshold,
  31. Features& fts) = 0;
  32. /// Flag the grid cell as occupied
  33. void setGridOccpuancy(const Vector2d& px); //如果点px在某一个cell中,将该cell对应的grid_occupanvy_置为true
  34. /// Set grid cells of existing features as occupied
  35. void setExistingFeatures(const Features& fts);// 遍历fts,如果fts的像素坐标在某一个cell中,就将对应的grid_occupancy置为true。
  36. protected:
  37. static const int border_ = 8; //!< no feature should be within 8px of border.
  38. //以下变量都通过AbstractDetector进行初始化
  39. const int cell_size_; // 将一张图像划分成很多网格,每个网格的大小(边长)
  40. const int n_pyr_levels_;
  41. const int grid_n_cols_;//网格的列数
  42. const int grid_n_rows_;//网格的行数
  43. vector<bool> grid_occupancy_;//大小为网格的数量,用于记录每个网格是否已经有特征点落入其中
  44. void resetGrid(); //将grid_occupancy_全置为false。
  45. inline int getCellIndex(int x, int y, int level) //对不同层的点xy,确定其在第0层的cell的索引
  46. const int scale = (1<<level);
  47. return (scale*y)/cell_size_*grid_n_cols_ + (scale*x)/cell_size_;
  48. }
  49. };
  50. typedef boost::shared_ptr<AbstractDetector> DetectorPtr;
  51. /// FAST detector by Edward Rosten.
  52. class FastDetector : public AbstractDetector
  53. {
  54. public:
  55. FastDetector(
  56. const int img_width,
  57. const int img_height,
  58. const int cell_size,
  59. const int n_pyr_levels);
  60. virtual ~FastDetector() {}
  61. virtual void detect(
  62. Frame* frame,
  63. const ImgPyr& img_pyr
  64. const double detection_threshold,
  65. Features& fts);
  66. }; //创建一个大小为网格数的corners并初始化。对每一层的图像都重新提取corner,将其放nm_corners中,遍历nm_corners,如果corner所在的位置对应的cell已经有特征点了,则跳过,否则,计算corner的分数,如果当前corner的分数大于corners【k】,则用当前的corner替换掉corners[k]中原来的数值。最终得到一个大小为网格数的corners。遍历corners,当其中的corner的分数大于设定的门限值时,将对应的特征点放在fts. 然后resetgrid。
  67. } // namespace feature_detection

sparse_img_align.h

  1. namespace vk {
  2. class AbstractCamera;
  3. }
  4. namespace svo {
  5. class Feature;
  6. /// Optimize the pose of the frame by minimizing the photometric error of feature patches.
  7. class SparseImgAlign : public vk::NLLSSolver<6, SE3>
  8. {
  9. static const int patch_halfsize_ = 2; //匹配块的大小
  10. static const int patch_size_ = 2*patch_halfsize_;
  11. static const int patch_area_ = patch_size_*patch_size_;
  12. public:
  13. EIGEN_MAKE_ALIGNED_OPERATOR_NEW
  14. cv::Mat resimg_;
  15. SparseImgAlign(
  16. int n_levels,
  17. int min_level,
  18. int n_iter,
  19. Method method,
  20. bool display,
  21. bool verbose); //初始化。其中n_iter,method,verbose用于初始化vk::NLLSSolver的相关成员变量。n_levels对应max_level。
  22. size_t run(
  23. FramePtr ref_frame,
  24. FramePtr cur_frame); //初始化cache中的变量, 返回n_meas/patch_area.(得到的应该是观测到的fts的数量)
  25. /// Return fisher information matrix, i.e. the Hessian of the log-likelihood
  26. /// at the converged state.
  27. Matrix<double, 6, 6> getFisherInformation();
  28. protected:
  29. FramePtr ref_frame_; //!< reference frame, has depth for gradient pixels.
  30. FramePtr cur_frame_; //!< only the image is known!
  31. int level_; //!< current pyramid level on which the optimization runs.
  32. bool display_; //!< display residual image.
  33. int max_level_; //!< coarsest pyramid level for the alignment
  34. int min_level_; //!< finest pyramid level for the alignment.
  35. // cache:
  36. Matrix<double, 6, Dynamic, ColMajor> jacobian_cache_; //列数为ref_patch_cache的大小。每一列为一个像素对应的jacobian。
  37. bool have_ref_patch_cache_;
  38. cv::Mat ref_patch_cache_; //行数为参考帧中的fts数,列数为patch_area的大小
  39. std::vector<bool> visible_fts_;//大小为参考帧的fts数
  40. void precomputeReferencePatches(); //计算ref中每个fts对应的patch的雅克比。将所有的雅克比存在jacobian_cahe_中。并将have_ref_patch_cache_置为true(在执行computeResiduals中,需要保证该步已经执行)
  41. //以下的5个函数是对其父类NLSSLOVE对应成员函数,但是在NLSSOLVE中没有写具体的实现,是纯虚函数,在该类中对这些纯虚函数进行实现!! !!!!
  42. virtual double computeResiduals(const SE3& model, bool linearize_system, bool compute_weight_scale = false); //直接法的目标函数的构造。包括将ref中3D点转化到cur中,再投影到cur中,然后计算两帧中对应点的光度误差;计算H和b
  43. virtual int solve(); // 用ldlt来求解Hx=b;
  44. virtual void update (const ModelType& old_model, ModelType& new_model); //更新相对位姿
  45. virtual void startIteration();
  46. virtual void finishIteration();
  47. };

feature_alignment.h

  1. namespace svo {
  2. /// Subpixel refinement of a reference feature patch with the current image.
  3. /// Implements the inverse-compositional approach (see "Lucas-Kanade 20 Years on"
  4. /// paper by Baker.
  5. namespace feature_alignment {
  6. bool align1D(
  7. const cv::Mat& cur_img,
  8. const Vector2f& dir, // direction in which the patch is allowed to move
  9. uint8_t* ref_patch_with_border,
  10. uint8_t* ref_patch,
  11. const int n_iter,
  12. Vector2d& cur_px_estimate,
  13. double& h_inv);
  14. bool align2D(
  15. const cv::Mat& cur_img,
  16. uint8_t* ref_patch_with_border,
  17. uint8_t* ref_patch,
  18. const int n_iter,
  19. Vector2d& cur_px_estimate,
  20. bool no_simd = false);
  21. bool align2D_SSE2(
  22. const cv::Mat& cur_img,
  23. uint8_t* ref_patch_with_border,
  24. uint8_t* ref_patch,
  25. const int n_iter,
  26. Vector2d& cur_px_estimate);
  27. bool align2D_NEON(
  28. const cv::Mat& cur_img,
  29. uint8_t* ref_patch_with_border,
  30. uint8_t* ref_patch,
  31. const int n_iter,
  32. Vector2d& cur_px_estimate);
  33. } // namespace feature_alignment
  34. } // namespace svo

pose_optimizer

  1. namespace svo {
  2. using namespace Eigen;
  3. using namespace Sophus;
  4. using namespace std;
  5. typedef Matrix<double,6,6> Matrix6d;
  6. typedef Matrix<double,2,6> Matrix26d;
  7. typedef Matrix<double,6,1> Vector6d;
  8. class Point;
  9. /// Motion-only bundle adjustment. Minimize the reprojection error of a single frame.
  10. namespace pose_optimizer {
  11. void optimizeGaussNewton(
  12. const double reproj_thresh,
  13. const size_t n_iter,
  14. const bool verbose,
  15. FramePtr& frame,
  16. double& estimated_scale,
  17. double& error_init,
  18. double& error_final,
  19. size_t& num_obs); //用BA来优化位姿。迭代优化结束后,用优化后的位姿来计算重投影误差,误差大的点删除,得到最终观测到的点num_obs。记录esitmated_scale,error_init,error_final,num_obs.
  20. } // namespace pose_optimizer
  21. } // namespace svo

matcher.h

  1. namespace vk {
  2. class AbstractCamera;
  3. namespace patch_score {
  4. template<int HALF_PATCH_SIZE> class ZMSSD;
  5. }
  6. }
  7. namespace svo {
  8. class Point;
  9. class Frame;
  10. class Feature;
  11. /// Warp a patch from the reference view to the current view.
  12. namespace warp {
  13. void getWarpMatrixAffine(
  14. const vk::AbstractCamera& cam_ref,
  15. const vk::AbstractCamera& cam_cur,
  16. const Vector2d& px_ref,
  17. const Vector3d& f_ref,
  18. const double depth_ref,
  19. const SE3& T_cur_ref,
  20. const int level_ref,
  21. Matrix2d& A_cur_ref); //计算放射矩阵A_cur_ref
  22. int getBestSearchLevel(
  23. const Matrix2d& A_cur_ref,
  24. const int max_level); //{
  25. // Compute patch level in other image
  26. /*int search_level = 0;
  27. double D = A_cur_ref.determinant();
  28. while(D > 3.0 && search_level < max_level)
  29. {
  30. search_level += 1;
  31. D *= 0.25;
  32. }
  33. return search_level;
  34. } */
  35. void warpAffine(
  36. const Matrix2d& A_cur_ref,
  37. const cv::Mat& img_ref,
  38. const Vector2d& px_ref,
  39. const int level_ref,
  40. const int level_cur,
  41. const int halfpatch_size,
  42. uint8_t* patch); //对参考帧像素对应的patch进行仿射变化, uint8_t *patch 依次指向仿射变化后像素对应的值. in practice, patch为Matcher的成员变量patch_with_border_
  43. } // namespace warp
  44. /// Patch-matcher for reprojection-matching and epipolar search in triangulation.
  45. class Matcher
  46. {
  47. public:
  48. EIGEN_MAKE_ALIGNED_OPERATOR_NEW
  49. static const int halfpatch_size_ = 4;
  50. static const int patch_size_ = 8;
  51. typedef vk::patch_score::ZMSSD<halfpatch_size_> PatchScore; //用于块儿匹配相似性打分
  52. struct Options
  53. {
  54. bool align_1d; //!< in epipolar search: align patch 1D along epipolar line
  55. int align_max_iter; //!< number of iterations for aligning the feature patches in gauss newton
  56. double max_epi_length_optim;//!< max length of epipolar line to skip epipolar search and directly go to img align 也就是说,当极线长度小于这个值时,不用进行极线搜索,直接进行img_align
  57. size_t max_epi_search_steps;//!< max number of evaluations along epipolar line
  58. bool subpix_refinement; //!< do gauss newton feature patch alignment after epipolar search
  59. bool epi_search_edgelet_filtering;
  60. double epi_search_edgelet_max_angle;
  61. Options() :
  62. align_1d(false),
  63. align_max_iter(10),
  64. max_epi_length_optim(2.0),
  65. max_epi_search_steps(1000),
  66. subpix_refinement(true),
  67. epi_search_edgelet_filtering(true),
  68. epi_search_edgelet_max_angle(0.7)
  69. {}
  70. } options_;
  71. uint8_t patch_[patch_size_*patch_size_] __attribute__ ((aligned (16)));
  72. uint8_t patch_with_border_[(patch_size_+2)*(patch_size_+2)] __attribute__ ((aligned (16)));
  73. Matrix2d A_cur_ref_; //!< affine warp matrix
  74. Vector2d epi_dir_;
  75. double epi_length_; //!< length of epipolar line segment in pixels (only used for epipolar search)
  76. double h_inv_; //!< hessian of 1d image alignment along epipolar line
  77. int search_level_;
  78. bool reject_;
  79. Feature* ref_ftr_;
  80. Vector2d px_cur_;
  81. Matcher() = default;
  82. ~Matcher() = default;
  83. /// Find a match by directly applying subpix refinement.
  84. /// IMPORTANT! This function assumes that px_cur is already set to an estimate that is within ~2-3 pixel of the final result!
  85. bool findMatchDirect(
  86. const Point& pt,
  87. const Frame& frame,
  88. Vector2d& px_cur);
  89. /// Find a match by searching along the epipolar line without using any features.
  90. bool findEpipolarMatchDirect(
  91. const Frame& ref_frame,
  92. const Frame& cur_frame,
  93. const Feature& ref_ftr,
  94. const double d_estimate,
  95. const double d_min,
  96. const double d_max,
  97. double& depth); //如果极线长度小于2,则px_cur为 (px_A+px_B)/2; otherwise,进行极线搜索,在极线上每间隔一段距离采样,然后计算该采样点对应的patch和ref的相似度,对相似度进行打分,选择分值最高的。 如果option.subpixle_refine=true, 则进行feature_align。
  98. void createPatchFromPatchWithBorder();
  99. };
  100. } // namespace svo

depth_filter.h

  1. namespace svo {
  2. class Frame;
  3. class Feature;
  4. class Point;
  5. /// A seed is a probabilistic depth estimate for a single pixel.
  6. struct Seed
  7. {
  8. EIGEN_MAKE_ALIGNED_OPERATOR_NEW
  9. static int batch_counter;
  10. static int seed_counter;
  11. int batch_id; //!< Batch id is the id of the keyframe for which the seed was created.
  12. int id; //!< Seed ID, only used for visualization.
  13. Feature* ftr; //!< Feature in the keyframe for which the depth should be computed.
  14. float a; //!< a of Beta distribution: When high, probability of inlier is large.
  15. float b; //!< b of Beta distribution: When high, probability of outlier is large.
  16. float mu; //!< Mean of normal distribution.
  17. float z_range; //!< Max range of the possible depth.
  18. float sigma2; //!< Variance of normal distribution.
  19. Matrix2d patch_cov; //!< Patch covariance in reference image.
  20. Seed(Feature* ftr, float depth_mean, float depth_min); //mu=1/depth_mean; z_range=1/depth_min;sigma2=(z_range^2)/36; 用逆深度进行计算。
  21. };
  22. /// Depth filter implements the Bayesian Update proposed in:
  23. /// "Video-based, Real-Time Multi View Stereo" by G. Vogiatzis and C. Hernández.
  24. /// In Image and Vision Computing, 29(7):434-441, 2011.
  25. ///
  26. /// The class uses a callback mechanism such that it can be used also by other
  27. /// algorithms than nslam and for simplified testing.
  28. class DepthFilter
  29. {
  30. public:
  31. EIGEN_MAKE_ALIGNED_OPERATOR_NEW
  32. typedef boost::unique_lock<boost::mutex> lock_t;
  33. typedef boost::function<void ( Point*, double )> callback_t;
  34. /// Depth-filter config parameters
  35. struct Options
  36. {
  37. bool check_ftr_angle; //!< gradient features are only updated if the epipolar line is orthogonal to the gradient.
  38. bool epi_search_1d; //!< restrict Gauss Newton in the epipolar search to the epipolar line.
  39. bool verbose; //!< display output.
  40. bool use_photometric_disparity_error; //!< use photometric disparity error instead of 1px error in tau computation.
  41. int max_n_kfs; //!< maximum number of keyframes for which we maintain seeds.
  42. double sigma_i_sq; //!< image noise.
  43. double seed_convergence_sigma2_thresh; //!< threshold on depth uncertainty for convergence.
  44. Options()
  45. : check_ftr_angle(false),
  46. epi_search_1d(false),
  47. verbose(false),
  48. use_photometric_disparity_error(false),
  49. max_n_kfs(3),
  50. sigma_i_sq(5e-4),
  51. seed_convergence_sigma2_thresh(200.0)
  52. {}
  53. } options_;
  54. DepthFilter(
  55. feature_detection::DetectorPtr feature_detector,
  56. callback_t seed_converged_cb);
  57. virtual ~DepthFilter();
  58. /// Start this thread when seed updating should be in a parallel thread.
  59. void startThread();
  60. /// Stop the parallel thread that is running.
  61. void stopThread();
  62. /// Add frame to the queue to be processed.
  63. void addFrame(FramePtr frame) // updateseeds
  64. /// Add new keyframe to the queue
  65. void addKeyframe(FramePtr frame, double depth_mean, double depth_min); //initializeSeed
  66. /// Remove all seeds which are initialized from the specified keyframe. This
  67. /// function is used to make sure that no seeds points to a non-existent frame
  68. /// when a frame is removed from the map.
  69. void removeKeyframe(FramePtr frame); //当要除去某一个关键帧时,除去seeds_中关键帧所包含的seeds。
  70. /// If the map is reset, call this function such that we don't have pointers
  71. /// to old frames.
  72. void reset(); //将seeds_和frame_queue_都清空。
  73. /// Returns a copy of the seeds belonging to frame. Thread-safe.
  74. /// Can be used to compute the Next-Best-View in parallel.
  75. /// IMPORTANT! Make sure you hold a valid reference counting pointer to frame
  76. /// so it is not being deleted while you use it.
  77. void getSeedsCopy(const FramePtr& frame, std::list<Seed>& seeds);
  78. /// Return a reference to the seeds. This is NOT THREAD SAFE!
  79. std::list<Seed, aligned_allocator<Seed> >& getSeeds() { return seeds_; }
  80. /// Bayes update of the seed, x is the measurement, tau2 the measurement uncertainty
  81. static void updateSeed(
  82. const float x,
  83. const float tau2,
  84. Seed* seed); //this function is used in the function of updateSeeds. this fuction update mu and sigma2
  85. /// Compute the uncertainty of the measurement.
  86. static double computeTau(
  87. const SE3& T_ref_cur,
  88. const Vector3d& f,
  89. const double z,
  90. const double px_error_angle)
  91. protected:
  92. feature_detection::DetectorPtr feature_detector_;
  93. callback_t seed_converged_cb_;
  94. std::list<Seed, aligned_allocator<Seed> > seeds_;
  95. boost::mutex seeds_mut_; //互斥量,用来实现线程同步
  96. bool seeds_updating_halt_; //!< Set this value to true when seeds updating should be interrupted.
  97. boost::thread* thread_;
  98. std::queue<FramePtr> frame_queue_;
  99. boost::mutex frame_queue_mut_;
  100. boost::condition_variable frame_queue_cond_;
  101. FramePtr new_keyframe_; //!< Next keyframe to extract new seeds.
  102. bool new_keyframe_set_; //!< Do we have a new keyframe to process?.
  103. double new_keyframe_min_depth_; //!< Minimum depth in the new keyframe. Used for range in new seeds.
  104. double new_keyframe_mean_depth_; //!< Maximum depth in the new keyframe. Used for range in new seeds.
  105. vk::PerformanceMonitor permon_; //!< Separate performance monitor since the DepthFilter runs in a parallel thread.
  106. Matcher matcher_;
  107. /// Initialize new seeds from a frame.
  108. void initializeSeeds(FramePtr frame); //传入新来的关键帧frame,对frame进行特征点提取,并停止updateseeds。遍历提取的特征点fts,并用fts,new_keyframe_min_depth,new_keyframe_mean_depth来初始化新的seed,并加入到seeds_中 ,然后继续进行 updateseeds。
  109. /// Update all seeds with a new measurement frame.
  110. virtual void updateSeeds(FramePtr frame); //仅针对能在该帧(frame)中能看到的seeds进行updateSeed,并将深度值收敛的seed加入到地图点中,同时清除该seed。每传入一个frame,才进行一次深度滤波更新。
  111. /// When a new keyframe arrives, the frame queue should be cleared.
  112. void clearFrameQueue();
  113. /// A thread that is continuously updating the seeds.
  114. void updateSeedsLoop();
  115. };
  116. } // namespace svo

map.h

  1. namespace svo {
  2. class Point;
  3. class Feature;
  4. class Seed;
  5. /// Container for converged 3D points that are not already assigned to two keyframes.
  6. class MapPointCandidates
  7. {
  8. public:
  9. typedef pair<Point*, Feature*> PointCandidate;
  10. typedef list<PointCandidate> PointCandidateList;
  11. /// The depth-filter is running in a parallel thread and fills the canidate list.
  12. /// This mutex controls concurrent access to point_candidates.
  13. boost::mutex mut_;
  14. /// Candidate points are created from converged seeds.
  15. /// Until the next keyframe, these points can be used for reprojection and pose optimization.
  16. PointCandidateList candidates_;
  17. list< Point* > trash_points_;
  18. MapPointCandidates();
  19. ~MapPointCandidates();
  20. /// Add a candidate point
  21. void newCandidatePoint(Point* point, double depth_sigma2); //第二个参数没用。将point和对应的在latest关键帧中的feature加入candidates_.
  22. /// Adds the feature to the frame and deletes candidate from list.
  23. void addCandidatePointToFrame(FramePtr frame); //遍历candidates_,将frame对应的特征进行frame::addFeature(),同时将该特征点从candidates_中删除。
  24. /// Remove a candidate point from the list of candidates.
  25. bool deleteCandidatePoint(Point* point); //遍历candidate_,找到point对应的PointCandidate,用deleteCandidate()函数删除,同时从candidates_中删除。
  26. /// Remove all candidates that belong to a frame.
  27. void removeFrameCandidates(FramePtr frame); // 和deleteCandidatePoint相似。
  28. /// Reset the candidate list, remove and delete all points.
  29. void reset(); //用于~MapPointCandidates();
  30. void deleteCandidate(PointCandidate& c); //{delete c.second; c.second=NULL; c.first->type_ = Point::TYPE_DELETED; trash_points_.push_back(c.first);}
  31. void emptyTrash(); //清空trashpoint
  32. };
  33. /// Map object which saves all keyframes which are in a map.
  34. class Map : boost::noncopyable
  35. {
  36. public:
  37. list< FramePtr > keyframes_; //!< List of keyframes in the map.
  38. list< Point* > trash_points_; //!< A deleted point is moved to the trash bin. Now and then this is cleaned. One reason is that the visualizer must remove the points also.
  39. MapPointCandidates point_candidates_;
  40. Map();
  41. ~Map();
  42. /// Reset the map. Delete all keyframes and reset the frame and point counters.
  43. void reset();
  44. /// Delete a point in the map and remove all references in keyframes to it.
  45. void safeDeletePoint(Point* pt); //用于removePtFrameRef中。 如果是关键点,将关键点删除,将pt->obs_清空,调用deletePoint删除该点
  46. /// Moves the point to the trash queue which is cleaned now and then.
  47. void deletePoint(Point* pt); //将pt的状态置为DELETE,并加入到trash_points_中。
  48. /// Moves the frame to the trash queue which is cleaned now and then.
  49. bool safeDeleteFrame(FramePtr frame); //删除关键帧frame。具体来说,调用removePTFramRef,并将该frame从keyframes中erase。 调用point_candidates_.removeFrameCandidates(frame);
  50. /// Remove the references between a point and a frame.
  51. void removePtFrameRef(Frame* frame, Feature* ftr);// 如果只有两帧观测到了该特征ftr,则进行safeDeletePoint。否测, 删除该关键帧frame所看到的该特征ftr, 如果该ftr对应keypoints,则将keypoints也删除。
  52. /// Add a new keyframe to the map.
  53. void addKeyframe(FramePtr new_keyframe); //push_back new_keyframe
  54. /// Given a frame, return all keyframes which have an overlapping field of view.
  55. void getCloseKeyframes(const FramePtr& frame, list< pair<FramePtr,double> >& close_kfs) const; //该函数用于getClosestKeyframe中。 遍历keyframes,对每个keyframe中的keypoint测试是否能在当前帧frame中观测到,如果能,则在close_kfs中pushback该keyframe以及frame和keyframe间的距离。
  56. /// Return the keyframe which is spatially closest and has overlapping field of view.
  57. FramePtr getClosestKeyframe(const FramePtr& frame) const; getCloseKeyframes中得到的close_kfs进行排序,选出最近的keyframe
  58. /// Return the keyframe which is furthest apart from pos.
  59. FramePtr getFurthestKeyframe(const Vector3d& pos) const; //遍历keyframes,通过keyframes->pos(),计算与pos最远的关键帧。
  60. bool getKeyframeById(const int id, FramePtr& frame) const; //寻找与id相对应的keyframe
  61. /// Transform the whole map with rotation R, translation t and scale s.
  62. void transform(const Matrix3d& R, const Vector3d& t, const double& s //计算通过R,t,s变化后每个keyframe的T_f_w, 同时将每个keyframe中的fts所对应的3D点进行变换。
  63. /// Empty trash bin of deleted keyframes and map points. We don't delete the
  64. /// points immediately to ensure proper cleanup and to provide the visualizer
  65. /// a list of objects which must be removed.
  66. void emptyTrash(); //清空trash_points_,并调用points_candidates_.emptytrash
  67. /// Return the keyframe which was last inserted in the map.
  68. inline FramePtr lastKeyframe() { return keyframes_.back(); }
  69. /// Return the number of keyframes in the map
  70. inline size_t size() const { return keyframes_.size(); }
  71. };
  72. /// A collection of debug functions to check the data consistency.
  73. namespace map_debug {
  74. void mapStatistics(Map* map); //计算每个关键帧平均观测到多少个点,每个点平均被多少个关键帧观测到。
  75. void mapValidation(Map* map, int id); //调用frameCalidation和pointValidation来验证帧和点是否存在。
  76. void frameValidation(Frame* frame, int id);
  77. void pointValidation(Point* point, int id);
  78. } // namespace map_debug
  79. } // namespace svo

reprojector.h

  1. namespace vk {
  2. class AbstractCamera;
  3. }
  4. namespace svo {
  5. class Map;
  6. class Point;
  7. /// Project points from the map into the image and find the corresponding
  8. /// feature (corner). We don't search a match for every point but only for one
  9. /// point per cell. Thereby, we achieve a homogeneously distributed(均匀分布) set of
  10. /// matched features and at the same time we can save processing time by not
  11. /// projecting all points.
  12. class Reprojector
  13. {
  14. public:
  15. EIGEN_MAKE_ALIGNED_OPERATOR_NEW
  16. /// Reprojector config parameters
  17. struct Options {
  18. size_t max_n_kfs; //!< max number of keyframes to reproject from
  19. bool find_match_direct;
  20. Options()
  21. : max_n_kfs(10),
  22. find_match_direct(true)
  23. {}
  24. } options_;
  25. size_t n_matches_;
  26. size_t n_trials_;
  27. Reprojector(vk::AbstractCamera* cam, Map& map); //初始化map,调用initializeGrid
  28. ~Reprojector();
  29. /// Project points from the map into the image. First finds keyframes with
  30. /// overlapping field of view and projects only those map-points.
  31. void reprojectMap(
  32. FramePtr frame,
  33. std::vector< std::pair<FramePtr,std::size_t> >& overlap_kfs);//先用map_.getcloseKeyframes来获得与frame相近的关键帧,将它们按距离由小到大排序,得到最近的N个关键帧。将每个关键帧对应的fts_用reprojectPoint重投影到frame中。然后将map_中的candidates用reprojectPoint重投影到frame中。 最后,对于frame,每个cell中有许多投影点。用reprojectCell对每个cell选出一个点。
  34. private:
  35. /// A candidate is a point that projects into the image plane and for which we
  36. /// will search a maching feature in the image.
  37. struct Candidate {
  38. EIGEN_MAKE_ALIGNED_OPERATOR_NEW
  39. Point* pt; //!< 3D point.
  40. Vector2d px; //!< projected 2D pixel location.
  41. Candidate(Point* pt, Vector2d& px) : pt(pt), px(px) {}
  42. };
  43. typedef std::list<Candidate, aligned_allocator<Candidate> > Cell;
  44. typedef std::vector<Cell*> CandidateGrid;
  45. /// The grid stores a set of candidate matches. For every grid cell we try to find one match.
  46. struct Grid
  47. {
  48. CandidateGrid cells;
  49. vector<int> cell_order;
  50. int cell_size;
  51. int grid_n_cols
  52. int grid_n_rows;
  53. };
  54. Grid grid_;
  55. Matcher matcher_;
  56. Map& map_;
  57. static bool pointQualityComparator(Candidate& lhs, Candidate& rhs);
  58. void initializeGrid(vk::AbstractCamera* cam); //初始化grid大小,cells大小和数量,给cell_order赋值后打乱顺序
  59. void resetGrid();
  60. bool reprojectCell(Cell& cell, FramePtr frame); //对于某一个cell,可能会有很多points在其中。对这些points按照类型好坏进行排序,然后遍历这些points,对每个point进行matcher_.findMatchDirect来校准该point。 当有一个point通过了筛选,则将该point从cell中删除,同时return true。
  61. bool reprojectPoint(FramePtr frame, Point* point); //将point进行投影,得到px,如果px在frame中,则计算落在哪个cell中,然后grid_.cells.at(k)->push_back(Candidate(point, px));
  62. };
  63. } // namespace svo

bundle_adjustment.h

  1. namespace g2o {
  2. class EdgeProjectXYZ2UV;
  3. class SparseOptimizer;
  4. class VertexSE3Expmap;
  5. class VertexSBAPointXYZ;
  6. }
  7. namespace svo {
  8. typedef g2o::EdgeProjectXYZ2UV g2oEdgeSE3;
  9. typedef g2o::VertexSE3Expmap g2oFrameSE3;
  10. typedef g2o::VertexSBAPointXYZ g2oPoint;
  11. class Frame;
  12. class Point;
  13. class Feature;
  14. class Map;
  15. /// Local, global and 2-view bundle adjustment with g2o
  16. namespace ba {
  17. /// Temporary container to hold the g2o edge with reference to frame and point.
  18. struct EdgeContainerSE3
  19. {
  20. g2oEdgeSE3* edge;
  21. Frame* frame;
  22. Feature* feature;
  23. bool is_deleted;
  24. EdgeContainerSE3(g2oEdgeSE3* e, Frame* frame, Feature* feature) :
  25. edge(e), frame(frame), feature(feature), is_deleted(false)
  26. {}
  27. };
  28. /// Optimize two camera frames and their observed 3D points.
  29. /// Is used after initialization.
  30. void twoViewBA(Frame* frame1, Frame* frame2, double reproj_thresh, Map* map);// 将两帧设为vertice,遍历第一帧中的点,将这些点设为vertice,然后将这些points对应的vertice和第一帧对应的vertice相连产生边。 对每一个point,用findFrameRef找到在第二帧中对应的feature.然后将这些点和第二帧相连产生边。 然后整体优化,并取出误差较大
  31. /// Local bundle adjustment.
  32. /// Optimizes core_kfs and their observed map points while keeping the
  33. /// neighbourhood fixed.
  34. void localBA(
  35. Frame* center_kf,
  36. set<FramePtr>* core_kfs,
  37. Map* map,
  38. size_t& n_incorrect_edges_1,
  39. size_t& n_incorrect_edges_2,
  40. double& init_error,
  41. double& final_error); //对core_kfs和core_kfs观测到的点进行优化。先对点进行structure only的优化,然后整体优化。
  42. /// Global bundle adjustment.
  43. /// Optimizes the whole map. Is currently not used in SVO.
  44. void globalBA(Map* map); //对map中所有的kyeframes和keyframes中的ftrs_进行优化。
  45. /// Initialize g2o with solver type, optimization strategy and camera model.
  46. void setupG2o(g2o::SparseOptimizer * optimizer); //设置求解器和相机参数。
  47. /// Run the optimization on the provided graph.
  48. void runSparseBAOptimizer(
  49. g2o::SparseOptimizer* optimizer,
  50. unsigned int num_iter,
  51. double& init_error,
  52. double& final_error); //进行优化。
  53. /// Create a g2o vertice from a keyframe object.
  54. g2oFrameSE3* createG2oFrameSE3(
  55. Frame* kf,
  56. size_t id,
  57. bool fixed);
  58. /// Creates a g2o vertice from a mappoint object.
  59. g2oPoint* createG2oPoint(
  60. Vector3d pos,
  61. size_t id,
  62. bool fixed);
  63. /// Creates a g2o edge between a g2o keyframe and mappoint vertice with the provided measurement.
  64. g2oEdgeSE3* createG2oEdgeSE3(
  65. g2oFrameSE3* v_kf,
  66. g2oPoint* v_mp,
  67. const Vector2d& f_up,
  68. bool robust_kernel,
  69. double huber_width,
  70. double weight = 1);
  71. } // namespace ba
  72. } // namespace svo

Initialization.h

  1. namespace svo {
  2. class FrameHandlerMono;
  3. /// Bootstrapping the map from the first two views.
  4. namespace initialization {
  5. enum InitResult { FAILURE, NO_KEYFRAME, SUCCESS };
  6. /// Tracks features using Lucas-Kanade tracker and then estimates a homography.
  7. class KltHomographyInit {
  8. friend class svo::FrameHandlerMono;
  9. public:
  10. EIGEN_MAKE_ALIGNED_OPERATOR_NEW
  11. FramePtr frame_ref_;
  12. KltHomographyInit() {};
  13. ~KltHomographyInit() {};
  14. InitResult addFirstFrame(FramePtr frame_ref); //调用detectFeatures,将检测到的特征存入px_ref_中。如果检测到的特征数小于100,则返回FAILURE,如果大于100,则将frame_ref存为frame_ref_,将px_ref_中的特征存入px_cur_;
  15. InitResult addSecondFrame(FramePtr frame_ref); //调用trackKlt对px_ref进行跟踪。跟踪数量小于一定值,返回FAILURE; disparity的均值小于一定值,说明两帧太近,返回NO_KEYFRAME。 调用computeHomography。如果inliers数量小于一定值,返回FAULURE。 以上筛选条件都通过后,用xyz_in_cur来计算在当前帧中的点的平均深度,用于求尺度因子,然后算出当前帧的位姿。对每一个inliers,计算它在世界坐标系下的3D点坐标,并将对应的feature加入到ref_frame和cur_frame中,同时将feature加入point->obs_中, 返回SUCCESS。
  16. void reset(); // px_cur_.clear(); frame_ref_.reset();
  17. protected:
  18. vector<cv::Point2f> px_ref_; //!< keypoints to be tracked in reference frame.
  19. vector<cv::Point2f> px_cur_; //!< tracked keypoints in current frame.
  20. vector<Vector3d> f_ref_; //!< bearing vectors corresponding to the keypoints in the reference image.
  21. vector<Vector3d> f_cur_; //!< bearing vectors corresponding to the keypoints in the current image.
  22. vector<double> disparities_; //!< disparity between first and second frame.
  23. vector<int> inliers_; //!< inliers after the geometric check (e.g., Homography).
  24. vector<Vector3d> xyz_in_cur_; //!< 3D points computed during the geometric check.
  25. SE3 T_cur_from_ref_; //!< computed transformation between the first two frames.
  26. };
  27. /// Detect Fast corners in the image.
  28. void detectFeatures(
  29. FramePtr frame,
  30. vector<cv::Point2f>& px_vec,
  31. vector<Vector3d>& f_vec); //对frame进行fastdetect,得到new_features,然后将其对应的px和f存入px_vec,f_vec
  32. /// Compute optical flow (Lucas Kanade) for selected keypoints.
  33. void trackKlt(
  34. FramePtr frame_ref,
  35. FramePtr frame_cur,
  36. vector<cv::Point2f>& px_ref,
  37. vector<cv::Point2f>& px_cur,
  38. vector<Vector3d>& f_ref,
  39. vector<Vector3d>& f_cur,
  40. vector<double>& disparities); //用opencv自带函数进行KLT,对于追踪失败的点,会从px_cur,px_ref,f_ref中抹去,对于追踪成功的点,会存在px_cur,f_cur中,并计算disparities。
  41. void computeHomography(
  42. const vector<Vector3d>& f_ref,
  43. const vector<Vector3d>& f_cur,
  44. double focal_length,
  45. double reprojection_threshold,
  46. vector<int>& inliers,
  47. vector<Vector3d>& xyz_in_cur,
  48. SE3& T_cur_from_ref); //从f_ref,f_cur得到像素坐标,然后通过vk::Homography计算两帧的相对位姿T_c_f,并用vk::computeInliers计算inliers和xyz_in_cur.
  49. } // namespace initialization
  50. } // namespace nslam

frame_handler_base.h

  1. namespace vk
  2. {
  3. class AbstractCamera;
  4. class PerformanceMonitor;
  5. }
  6. namespace svo
  7. {
  8. class Point;
  9. class Matcher;
  10. class DepthFilter;
  11. /// Base class for various VO pipelines. Manages the map and the state machine.
  12. class FrameHandlerBase : boost::noncopyable
  13. {
  14. public:
  15. enum Stage {
  16. STAGE_PAUSED,
  17. STAGE_FIRST_FRAME,
  18. STAGE_SECOND_FRAME,
  19. STAGE_DEFAULT_FRAME,
  20. STAGE_RELOCALIZING
  21. };
  22. enum TrackingQuality {
  23. TRACKING_INSUFFICIENT,
  24. TRACKING_BAD,
  25. TRACKING_GOOD
  26. };
  27. enum UpdateResult {
  28. RESULT_NO_KEYFRAME,
  29. RESULT_IS_KEYFRAME,
  30. RESULT_FAILURE
  31. };
  32. FrameHandlerBase(); // stage_(STAGE_PAUSED),set_reset_(false),set_start_(false),acc_frame_timings_(10),acc_num_obs_(10),num_obs_last_(0),tracking_quality_(TRACKING_INSUFFICIENT). 如果定义了SVO_TRACE,对performerMonitor做一些初始化。
  33. virtual ~FrameHandlerBase();
  34. /// Get the current map.
  35. const Map& map() const { return map_; }
  36. /// Will reset the map as soon as the current frame is finished processing.
  37. void reset() { set_reset_ = true; }
  38. /// Start processing.
  39. void start() { set_start_ = true; }
  40. /// Get the current stage of the algorithm.
  41. Stage stage() const { return stage_; }
  42. /// Get tracking quality.
  43. TrackingQuality trackingQuality() const { return tracking_quality_; }
  44. /// Get the processing time of the previous iteration.
  45. double lastProcessingTime() const { return timer_.getTime(); }
  46. /// Get the number of feature observations of the last frame.
  47. size_t lastNumObservations() const { return num_obs_last_; }
  48. protected:
  49. Stage stage_; //!< Current stage of the algorithm.
  50. bool set_reset_; //!< Flag that the user can set. Will reset the system before the next iteration.
  51. bool set_start_; //!< Flag the user can set to start the system when the next image is received.
  52. Map map_; //!< Map of keyframes created by the slam system.
  53. vk::Timer timer_; //!< Stopwatch to measure time to process frame.
  54. vk::RingBuffer<double> acc_frame_timings_; //!< Total processing time of the last 10 frames, used to give some user feedback on the performance.
  55. vk::RingBuffer<size_t> acc_num_obs_; //!< Number of observed features of the last 10 frames, used to give some user feedback on the tracking performance.
  56. size_t num_obs_last_; //!< Number of observations in the previous frame.
  57. TrackingQuality tracking_quality_; //!< An estimate of the tracking quality based on the number of tracked features.
  58. /// Before a frame is processed, this function is called.
  59. bool startFrameProcessingCommon(const double timestamp);
  60. /// When a frame is finished processing, this function is called.
  61. int finishFrameProcessingCommon(
  62. const size_t update_id,
  63. const UpdateResult dropout,
  64. const size_t num_observations);
  65. /// Reset the map and frame handler to start from scratch.
  66. void resetCommon();
  67. /// Reset the frame handler. Implement in derived class.
  68. virtual void resetAll() { resetCommon(); }
  69. /// Set the tracking quality based on the number of tracked features.
  70. virtual void setTrackingQuality(const size_t num_observations);
  71. /// Optimize some of the observed 3D points.
  72. virtual void optimizeStructure(FramePtr frame, size_t max_n_pts, int max_iter);
  73. };
  74. } // namespace nslam

frame_handler_mono.h

  1. namespace svo {
  2. /// Monocular Visual Odometry Pipeline as described in the SVO paper.
  3. class FrameHandlerMono : public FrameHandlerBase
  4. {
  5. public:
  6. EIGEN_MAKE_ALIGNED_OPERATOR_NEW
  7. FrameHandlerMono(vk::AbstractCamera* cam);
  8. virtual ~FrameHandlerMono();
  9. /// Provide an image.
  10. void addImage(const cv::Mat& img, double timestamp); //先清理core_kfs,overlap_kfs. 然后用传入的图片初始化new_frame. 然后通过系统所处的不同stage_进行不同处理,然后last_frame_=new_frame_;new_frame_.reset().
  11. /// Set the first frame (used for synthetic datasets in benchmark node)
  12. void setFirstFrame(const FramePtr& first_frame); // resetAll();last_frame_ = first_frame; last_frame_->setKeyframe();map_.addKeyframe(last_frame_);stage_ = STAGE_DEFAULT_FRAME;
  13. /// Get the last frame that has been processed.
  14. FramePtr lastFrame() { return last_frame_; }
  15. /// Get the set of spatially closest keyframes of the last frame.
  16. const set<FramePtr>& coreKeyframes() { return core_kfs_; }
  17. /// Return the feature track to visualize the KLT tracking during initialization.
  18. const vector<cv::Point2f>& initFeatureTrackRefPx() const { return klt_homography_init_.px_ref_; }
  19. const vector<cv::Point2f>& initFeatureTrackCurPx() const { return klt_homography_init_.px_cur_; }
  20. /// Access the depth filter.
  21. DepthFilter* depthFilter() const { return depth_filter_; }
  22. /// An external place recognition module may know where to relocalize.
  23. bool relocalizeFrameAtPose(
  24. const int keyframe_id,
  25. const SE3& T_kf_f,
  26. const cv::Mat& img,
  27. const double timestamp); ///通过keyframe_id,用map_中的getKeyframeById得到挂ref_keyframe.通过img重新初始化一个new_frame_. 然后调用relocalizeFrame。只要返回结果不是REUSLT_FAILURE,说明重定位成功,将last_frame_=new_frame_, return true.
  28. protected:
  29. vk::AbstractCamera* cam_; //!< Camera model, can be ATAN, Pinhole or Ocam (see vikit).
  30. Reprojector reprojector_; //!< Projects points from other keyframes into the current frame
  31. FramePtr new_frame_; //!< Current frame.
  32. FramePtr last_frame_; //!< Last frame, not necessarily a keyframe.
  33. set<FramePtr> core_kfs_; //!< Keyframes in the closer neighbourhood.
  34. vector< pair<FramePtr,size_t> > overlap_kfs_; //!< All keyframes with overlapping field of view. the paired number specifies how many common mappoints are observed TODO: why vector!?
  35. initialization::KltHomographyInit klt_homography_init_; //!< Used to estimate pose of the first two keyframes by estimating a homography.
  36. DepthFilter* depth_filter_; //!< Depth estimation algorithm runs in a parallel thread and is used to initialize new 3D points.
  37. /// Initialize the visual odometry algorithm.
  38. virtual void initialize(); //初始化feature_detector,depth_filter_
  39. /// Processes the first frame and sets it as a keyframe.
  40. virtual UpdateResult processFirstFrame(); //将第一帧的位姿设为单位阵作为参考。调用initialize中的addFirstFrame来提取特征点,如果成功,设置关键帧,stage_=STAGE_SECOND_FRAME
  41. /// Processes all frames after the first frame until a keyframe is selected.
  42. virtual UpdateResult processSecondFrame();//调用initialize中的addSecondFrame来跟踪第一帧的特征点,并计算出特征点对应的3D坐标。将该帧new_frame_设为关键帧。通过frame_utils中的getSceneDepth来计算new_frame_中的场景深度值,并调用depth_filter和map_中的addKeyFrame,
  43. /// Processes all frames after the first two keyframes.
  44. virtual UpdateResult processFrame(); //用上一帧的位姿初始化new_frame_的位姿。然后依次进行sparse_image_align,map rprojector&feature alignment,pose optimization, structure optimization. 然后判断new_frame_是否是关键帧,如果不是,该帧用于depth_filter中updateSeeds,如果是,则设为关键帧,并判断删除旧的关键帧。
  45. /// Try relocalizing the frame at relative position to provided keyframe.
  46. virtual UpdateResult relocalizeFrame
  47. const SE3& T_cur_ref,
  48. FramePtr ref_keyframe); //通过ref_keyframe来和new_frame_进行sparse_image_alignment 如果返回的追踪上的点的数量大于30,则将ref_keyframe设为last_frame, 进行processFrame(), 只要返回值不是RESULT_FAILURE,就说明重定位成功。 如果小于30,则重定位失败。
  49. /// Reset the frame handler. Implement in derived class.
  50. virtual void resetAll();
  51. /// Keyframe selection criterion.
  52. virtual bool needNewKf(double scene_depth_mean); //通过遍历overlap_kfs_来计算每个overlap_kf与new_frame的相对距离,如果这个距离大于一定的值,则返回true
  53. void setCoreKfs(size_t n_closest);//从overlap_kfs_中选出与new_frame_共视点最多的n_closest个关键帧作为core_kfs_ .
  54. };
  55. } // namespace svo
添加新批注
在作者公开此批注前,只有你和作者可见。
回复批注