diff options
author | Stanislaw Halik <sthalik@misaki.pl> | 2016-09-20 23:08:10 +0200 |
---|---|---|
committer | Stanislaw Halik <sthalik@misaki.pl> | 2016-09-20 23:24:16 +0200 |
commit | 9da39ce65e42097b5f05eed2ce2cd40cf234ef73 (patch) | |
tree | da0ad49b346c5802c1c6d9954ac5053de8ba3202 /tracker-pt/point_tracker.h | |
parent | 32e1adc0af44cf0cb8495118700884b7ad479a6e (diff) |
tracker/pt: merge from unstable
- the pose estimator doesn't need locking at all
- only return point count to the dialog, reducing locking
- allow for only 8 pixels difference between consecutive dynamic
pose frames at 640x480, half that at 320x240
- extract points taking in account pixel brightness, not merely
contours
- in case of more than three points, prefer the brightest ones
scoring on radius and average pixel brightness
Diffstat (limited to 'tracker-pt/point_tracker.h')
-rw-r--r-- | tracker-pt/point_tracker.h | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/tracker-pt/point_tracker.h b/tracker-pt/point_tracker.h index 00e9278c..77c07125 100644 --- a/tracker-pt/point_tracker.h +++ b/tracker-pt/point_tracker.h @@ -15,7 +15,6 @@ #include "ftnoir_tracker_pt_settings.h" #include <QObject> -#include <QMutex> class Affine { @@ -118,9 +117,9 @@ public: // track the pose using the set of normalized point coordinates (x pos in range -0.5:0.5) // f : (focal length)/(sensor width) // dt : time since last call - void track(const std::vector<cv::Vec2f>& projected_points, const PointModel& model, float f, bool dynamic_pose, int init_phase_timeout); - Affine pose() { QMutexLocker l(&mtx); return X_CM; } - cv::Vec2f project(const cv::Vec3f& v_M, float f); + void track(const std::vector<cv::Vec2d>& projected_points, const PointModel& model, double focal_length, bool dynamic_pose, int init_phase_timeout, int w, int h); + Affine pose() { return X_CM; } + cv::Vec2d project(const cv::Vec3d& v_M, double focal_length); private: // the points in model order struct PointOrder @@ -133,15 +132,14 @@ private: } }; - PointOrder find_correspondences_previous(const std::vector<cv::Vec2f>& points, const PointModel &model, float f); PointOrder find_correspondences(const std::vector<cv::Vec2d>& projected_points, const PointModel &model); + PointOrder find_correspondences_previous(const std::vector<cv::Vec2d>& points, const PointModel &model, double focal_length, int w, int h); bool POSIT(const PointModel& point_model, const PointOrder& order, double focal_length); // The POSIT algorithm, returns the number of iterations Affine X_CM; // trafo from model to camera Timer t; bool init_phase; - QMutex mtx; }; #endif //POINTTRACKER_H |