diff options
author | Stanislaw Halik <sthalik@misaki.pl> | 2015-02-01 03:48:14 +0100 |
---|---|---|
committer | Stanislaw Halik <sthalik@misaki.pl> | 2015-02-01 03:48:14 +0100 |
commit | f3fc96424dfc31576917e39db555cc5d68ddc4af (patch) | |
tree | a87bfd23932eb9426cdb99b72daf5ff6eac8faf6 /ftnoir_tracker_pt/point_tracker.h | |
parent | e83d0be0de7ebb13735715c9cc257aaec62d49dd (diff) |
bring back dynamic pose resolution following user outrageopentrack-2.3-rc9
Uses a simpler method without computing point velocities.
Issues: #112, #126
Diffstat (limited to 'ftnoir_tracker_pt/point_tracker.h')
-rw-r--r-- | ftnoir_tracker_pt/point_tracker.h | 15 |
1 files changed, 12 insertions, 3 deletions
diff --git a/ftnoir_tracker_pt/point_tracker.h b/ftnoir_tracker_pt/point_tracker.h index 61d2a826..10bd2cef 100644 --- a/ftnoir_tracker_pt/point_tracker.h +++ b/ftnoir_tracker_pt/point_tracker.h @@ -119,14 +119,23 @@ public: // track the pose using the set of normalized point coordinates (x pos in range -0.5:0.5) // f : (focal length)/(sensor width) // dt : time since last call - void track(const std::vector<cv::Vec2f>& projected_points, const PointModel& model, float f); + void track(const std::vector<cv::Vec2f>& projected_points, const PointModel& model, float f, bool dynamic_pose); Affine pose() const { return X_CM; } - + cv::Vec2f project(const cv::Vec3f& v_M, float f); private: // the points in model order - typedef struct { cv::Vec2f points[PointModel::N_POINTS]; } PointOrder; + struct PointOrder + { + cv::Vec2f points[PointModel::N_POINTS]; + PointOrder() + { + for (int i = 0; i < PointModel::N_POINTS; i++) + points[i] = cv::Vec2f(0, 0); + } + }; PointOrder find_correspondences(const std::vector<cv::Vec2f>& projected_points, const PointModel &model); + PointOrder find_correspondences_previous(const std::vector<cv::Vec2f>& points, const PointModel &model, float f); int POSIT(const PointModel& point_model, const PointOrder& order, float focal_length); // The POSIT algorithm, returns the number of iterations Affine X_CM; // trafo from model to camera |