diff options
author | Stanislaw Halik <sthalik@misaki.pl> | 2014-10-05 01:22:11 +0200 |
---|---|---|
committer | Stanislaw Halik <sthalik@misaki.pl> | 2014-10-05 01:22:11 +0200 |
commit | cfffa29e29db6b2234c7f534b1ebcd612b7f4914 (patch) | |
tree | 35126a8dfc5304efaa4617efbc92e31617e4a030 /ftnoir_tracker_pt/point_tracker.h | |
parent | 97bd173ee4b6f30c12ca590e213b21bbc83f8617 (diff) |
flush
Diffstat (limited to 'ftnoir_tracker_pt/point_tracker.h')
-rw-r--r-- | ftnoir_tracker_pt/point_tracker.h | 74 |
1 files changed, 37 insertions, 37 deletions
diff --git a/ftnoir_tracker_pt/point_tracker.h b/ftnoir_tracker_pt/point_tracker.h index c8212538..d65494a4 100644 --- a/ftnoir_tracker_pt/point_tracker.h +++ b/ftnoir_tracker_pt/point_tracker.h @@ -8,7 +8,7 @@ #ifndef POINTTRACKER_H #define POINTTRACKER_H -#include <opencv2/opencv.hpp> +#include <opencv2/core/core.hpp> #ifndef OPENTRACK_API # include <boost/shared_ptr.hpp> #else @@ -21,31 +21,31 @@ class FrameTrafo { public: - FrameTrafo() : R(cv::Matx33f::eye()), t(0,0,0) {} - FrameTrafo(const cv::Matx33f& R, const cv::Vec3f& t) : R(R),t(t) {} + FrameTrafo() : R(cv::Matx33f::eye()), t(0,0,0) {} + FrameTrafo(const cv::Matx33f& R, const cv::Vec3f& t) : R(R),t(t) {} - cv::Matx33f R; - cv::Vec3f t; + cv::Matx33f R; + cv::Vec3f t; }; inline FrameTrafo operator*(const FrameTrafo& X, const FrameTrafo& Y) { - return FrameTrafo(X.R*Y.R, X.R*Y.t + X.t); + return FrameTrafo(X.R*Y.R, X.R*Y.t + X.t); } inline FrameTrafo operator*(const cv::Matx33f& X, const FrameTrafo& Y) { - return FrameTrafo(X*Y.R, X*Y.t); + return FrameTrafo(X*Y.R, X*Y.t); } inline FrameTrafo operator*(const FrameTrafo& X, const cv::Matx33f& Y) { - return FrameTrafo(X.R*Y, X.t); + return FrameTrafo(X.R*Y, X.t); } inline cv::Vec3f operator*(const FrameTrafo& X, const cv::Vec3f& v) { - return X.R*v + X.t; + return X.R*v + X.t; } @@ -55,28 +55,28 @@ inline cv::Vec3f operator*(const FrameTrafo& X, const cv::Vec3f& v) // [Denis Oberkampf, Daniel F. DeMenthon, Larry S. Davis: "Iterative Pose Estimation Using Coplanar Feature Points"] class PointModel { - friend class PointTracker; + friend class PointTracker; public: - static constexpr int N_POINTS = 3; + static constexpr int N_POINTS = 3; - PointModel(cv::Vec3f M01, cv::Vec3f M02); + PointModel(cv::Vec3f M01, cv::Vec3f M02); PointModel(); - inline const cv::Vec3f& get_M01() const { return M01; } - inline const cv::Vec3f& get_M02() const { return M02; } + inline const cv::Vec3f& get_M01() const { return M01; } + inline const cv::Vec3f& get_M02() const { return M02; } private: - cv::Vec3f M01; // M01 in model frame - cv::Vec3f M02; // M02 in model frame + cv::Vec3f M01; // M01 in model frame + cv::Vec3f M02; // M02 in model frame - cv::Vec3f u; // unit vector perpendicular to M01,M02-plane + cv::Vec3f u; // unit vector perpendicular to M01,M02-plane - cv::Matx22f P; + cv::Matx22f P; - cv::Vec2f d; // determinant vector for point correspondence - int d_order[3]; // sorting of projected model points with respect to d scalar product + cv::Vec2f d; // determinant vector for point correspondence + int d_order[3]; // sorting of projected model points with respect to d scalar product - void get_d_order(const std::vector<cv::Vec2f>& points, int d_order[]) const; + void get_d_order(const std::vector<cv::Vec2f>& points, int d_order[]) const; }; // ---------------------------------------------------------------------------- @@ -86,29 +86,29 @@ private: class PointTracker { public: - PointTracker(); - // track the pose using the set of normalized point coordinates (x pos in range -0.5:0.5) - // f : (focal length)/(sensor width) - // dt : time since last call - void track(const std::vector<cv::Vec2f>& projected_points, const PointModel& model); - FrameTrafo get_pose() const { return X_CM; } - void reset(); + PointTracker(); + // track the pose using the set of normalized point coordinates (x pos in range -0.5:0.5) + // f : (focal length)/(sensor width) + // dt : time since last call + void track(const std::vector<cv::Vec2f>& projected_points, const PointModel& model); + FrameTrafo get_pose() const { return X_CM; } + void reset(); private: // the points in model order typedef struct { cv::Vec2f points[PointModel::N_POINTS]; } PointOrder; - static constexpr float focal_length = 1.0f; - - inline cv::Vec2f project(const cv::Vec3f& v_M) - { - cv::Vec3f v_C = X_CM * v_M; - return cv::Vec2f(focal_length*v_C[0]/v_C[2], focal_length*v_C[1]/v_C[2]); - } + static constexpr float focal_length = 1.0f; + + inline cv::Vec2f project(const cv::Vec3f& v_M) + { + cv::Vec3f v_C = X_CM * v_M; + return cv::Vec2f(focal_length*v_C[0]/v_C[2], focal_length*v_C[1]/v_C[2]); + } PointOrder find_correspondences(const std::vector<cv::Vec2f>& projected_points, const PointModel &model); int POSIT(const PointModel& point_model, const PointOrder& order); // The POSIT algorithm, returns the number of iterations - - FrameTrafo X_CM; // trafo from model to camera + + FrameTrafo X_CM; // trafo from model to camera }; #endif //POINTTRACKER_H |