From f5a5777e2711ac342b9cab517ac3bfba0313bdaf Mon Sep 17 00:00:00 2001 From: Stéphane Lenclud Date: Mon, 18 Mar 2019 16:52:19 +0100 Subject: Auto-reset when detecting absurd angles. Better buffer scaling to focus on the relevant part of our spectrum. --- tracker-pt/ftnoir_tracker_pt.cpp | 11 ++++++++++- tracker-pt/module/camera_kinect_ir.cpp | 19 +++++++++++++++++-- tracker-pt/point_tracker.cpp | 7 +++++-- tracker-pt/point_tracker.h | 2 +- 4 files changed, 33 insertions(+), 6 deletions(-) diff --git a/tracker-pt/ftnoir_tracker_pt.cpp b/tracker-pt/ftnoir_tracker_pt.cpp index 1f4afe46..797e28dc 100644 --- a/tracker-pt/ftnoir_tracker_pt.cpp +++ b/tracker-pt/ftnoir_tracker_pt.cpp @@ -214,13 +214,22 @@ void Tracker_PT::data(double *data) data[TX] = (double)t[0] / 10; data[TY] = (double)t[1] / 10; data[TZ] = (double)t[2] / 10; + + // Workaround an issue where our tracker is stuck reporting extrem roll and yaw values around +/-170 + // Using Kinect with a cap this is easy to reproduce by getting close enough from the sensor (<50cm) and stepping back. + if (data[Roll] > 100 || data[Roll] < -100 || data[Pitch] < -60 ) + { + // Hopefully our user did not break his neck, something is wrong, reset our tracker + QMutexLocker l(¢er_lock); + point_tracker.reset_state(); + // TODO: Provide the last valid data frame instead + } } } bool Tracker_PT::center() { QMutexLocker l(¢er_lock); - point_tracker.reset_state(); return false; } diff --git a/tracker-pt/module/camera_kinect_ir.cpp b/tracker-pt/module/camera_kinect_ir.cpp index 2b3f389f..a798b65b 100644 --- a/tracker-pt/module/camera_kinect_ir.cpp +++ b/tracker-pt/module/camera_kinect_ir.cpp @@ -6,6 +6,9 @@ */ #include "camera_kinect_ir.h" + +#if __has_include() + #include "frame.hpp" #include "compat/sleep.hpp" @@ -220,6 +223,7 @@ bool CameraKinectIr::get_frame_(cv::Mat& frame) IFrameDescription* frameDescription = NULL; int nWidth = 0; int nHeight = 0; + float diagonalFieldOfView = 0.0f; UINT nBufferSize = 0; UINT16 *pBuffer = NULL; @@ -240,6 +244,11 @@ bool CameraKinectIr::get_frame_(cv::Mat& frame) hr = frameDescription->get_Height(&nHeight); } + if (SUCCEEDED(hr)) + { + hr = frameDescription->get_DiagonalFieldOfView(&diagonalFieldOfView); + } + if (SUCCEEDED(hr)) { hr = iInfraredFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer); @@ -255,8 +264,12 @@ bool CameraKinectIr::get_frame_(cv::Mat& frame) // Convert that OpenCV matrix to an RGB one as this is what is expected by our point extractor // TODO: Ideally we should implement a point extractors that works with our native buffer // First resample to 8-bits - double min, max; - cv::minMaxLoc(raw, &min, &max); // Should we use 16bit min and max instead? + double min = std::numeric_limits::min(); + double max = std::numeric_limits::max(); + // For scalling to have more precission in the range we are interrested in + min = max - 255; + //cv::minMaxLoc(raw, &min, &max); // Should we use 16bit min and max instead? + cv::Mat raw8; raw.convertTo(raw8, CV_8U, 255.0 / (max - min), -255.0*min / (max - min)); // Second convert to RGB @@ -302,3 +315,5 @@ void CameraKinectIr::camera_deleter::operator()(cv::VideoCapture* cap) } } // ns pt_module + +#endif diff --git a/tracker-pt/point_tracker.cpp b/tracker-pt/point_tracker.cpp index ef70789a..35a3b65b 100644 --- a/tracker-pt/point_tracker.cpp +++ b/tracker-pt/point_tracker.cpp @@ -143,7 +143,7 @@ void PointTracker::track(const std::vector& points, const f fx = pt_camera_info::get_focal_length(info.fov, info.res_x, info.res_y); PointOrder order; - if (init_phase_timeout <= 0 || t.elapsed_ms() > init_phase_timeout || init_phase) + if (init_phase_timeout <= 0 || timer.elapsed_ms() > init_phase_timeout || init_phase) { init_phase = true; order = find_correspondences(points.data(), model); @@ -154,7 +154,7 @@ void PointTracker::track(const std::vector& points, if (POSIT(model, order, fx) != -1) { init_phase = false; - t.start(); + timer.start(); } else reset_state(); @@ -360,7 +360,10 @@ vec2 PointTracker::project(const vec3& v_M, f focal_length, const Affine& X_CM) void PointTracker::reset_state() { init_phase = true; + //X_CM = {}; X_CM_expected = {}; + //prev_positions = {}; + //timer.start(); } } // ns pt_impl diff --git a/tracker-pt/point_tracker.h b/tracker-pt/point_tracker.h index 70c7a9fc..c7ae9b54 100644 --- a/tracker-pt/point_tracker.h +++ b/tracker-pt/point_tracker.h @@ -78,7 +78,7 @@ private: Affine X_CM; // transform from model to camera Affine X_CM_expected; PointOrder prev_positions; - Timer t; + Timer timer; bool init_phase = true; }; -- cgit v1.2.3