diff options
Diffstat (limited to 'tracker-points')
-rw-r--r-- | tracker-points/CMakeLists.txt | 2 | ||||
-rw-r--r-- | tracker-points/ftnoir_tracker_pt.cpp | 87 | ||||
-rw-r--r-- | tracker-points/ftnoir_tracker_pt.h | 1 | ||||
-rw-r--r-- | tracker-points/module/point_extractor.cpp | 3 | ||||
-rw-r--r-- | tracker-points/module/point_extractor.h | 9 | ||||
-rw-r--r-- | tracker-points/pt-api.hpp | 2 |
6 files changed, 91 insertions, 13 deletions
diff --git a/tracker-points/CMakeLists.txt b/tracker-points/CMakeLists.txt index b9fcca9e..dd3a0281 100644 --- a/tracker-points/CMakeLists.txt +++ b/tracker-points/CMakeLists.txt @@ -2,7 +2,7 @@ find_package(OpenCV QUIET) if(OpenCV_FOUND) otr_module(tracker-points-base STATIC) target_include_directories(${self} SYSTEM PUBLIC ${OpenCV_INCLUDE_DIRS}) - target_link_libraries(${self} opencv_imgproc opentrack-cv opencv_core opentrack-video) + target_link_libraries(${self} opencv_imgproc opencv_calib3d opentrack-cv opencv_core opentrack-video) #set_property(TARGET ${self} PROPERTY OUTPUT_NAME "points-base") endif() add_subdirectory(module) diff --git a/tracker-points/ftnoir_tracker_pt.cpp b/tracker-points/ftnoir_tracker_pt.cpp index e455a9ed..deef37b2 100644 --- a/tracker-points/ftnoir_tracker_pt.cpp +++ b/tracker-points/ftnoir_tracker_pt.cpp @@ -20,6 +20,8 @@ #include <opencv2\calib3d.hpp> +#include <iostream> + using namespace options; namespace pt_impl { @@ -74,7 +76,8 @@ void Tracker_PT::run() if (preview_visible) *preview_frame = *frame; - point_extractor->extract_points(*frame, *preview_frame, points); + iImagePoints.clear(); + point_extractor->extract_points(*frame, *preview_frame, points, iImagePoints); point_count.store(points.size(), std::memory_order_relaxed); const bool success = points.size() >= PointModel::N_POINTS; @@ -98,19 +101,67 @@ void Tracker_PT::run() // TODO: Solve with OpenCV - std::vector<cv::Point3f> objectPoints; - //TODO: Stuff object points in that vector + // Construct the points defining the object we want to detect based on settings. + // We are converting them from millimeters to meters. + // TODO: Need to support clip too. That's cap only for now. + std::vector<cv::Point3f> objectPoints; + objectPoints.push_back(cv::Point3f(s.cap_x/1000.0,0,0)); // Right + objectPoints.push_back(cv::Point3f(-s.cap_x/1000.0, 0, 0)); // Left + objectPoints.push_back(cv::Point3f(0, s.cap_y/1000.0, s.cap_z/1000.0)); // Top + std::vector<cv::Point2f> trackedPoints; - //TODO: Stuff bitmap point in there making sure they match the order of the object point + //TODO: Stuff bitmap point in there making sure they match the order of the object point + // Find top most point + int topPointIndex = -1; + int maxY = 0; + for (int i = 0; i < 3; i++) + { + if (iImagePoints[i][1]>maxY) + { + maxY = iImagePoints[i][1]; + topPointIndex = i; + } + } + + int rightPointIndex = -1; + int maxX = 0; + + // Find right most point + for (int i = 0; i < 3; i++) + { + // Excluding top most point + if (i!=topPointIndex && iImagePoints[i][0] > maxX) + { + maxX = iImagePoints[i][0]; + rightPointIndex = i; + } + } + + // Find left most point + int leftPointIndex = -1; + for (int i = 0; i < 3; i++) + { + // Excluding top most point + if (i != topPointIndex && i != rightPointIndex) + { + leftPointIndex = i; + break; + } + } + + // + trackedPoints.push_back(cv::Point2f(iImagePoints[rightPointIndex][0], iImagePoints[rightPointIndex][1])); + trackedPoints.push_back(cv::Point2f(iImagePoints[leftPointIndex][0], iImagePoints[leftPointIndex][1])); + trackedPoints.push_back(cv::Point2f(iImagePoints[topPointIndex][0], iImagePoints[topPointIndex][1])); // Create our camera matrix - // TODO: Just do that once, use data memeber instead + // TODO: Just do that once, use data member instead // Double or Float? cv::Mat cameraMatrix; cameraMatrix.create(3, 3, CV_64FC1); cameraMatrix.setTo(cv::Scalar(0)); cameraMatrix.at<double>(0, 0) = camera->info.focalLengthX; - cameraMatrix.at<double>(1, 1) = camera->info.focalLengthX; + cameraMatrix.at<double>(1, 1) = camera->info.focalLengthY; cameraMatrix.at<double>(0, 2) = camera->info.principalPointX; cameraMatrix.at<double>(1, 2) = camera->info.principalPointY; cameraMatrix.at<double>(2, 2) = 1; @@ -123,11 +174,33 @@ void Tracker_PT::run() distCoeffs.at<double>(i, 0) = 0; } + // Define our solution arrays + // They will receive up to 4 solutions for our P3P problem std::vector<cv::Mat> rvecs, tvecs; // TODO: try SOLVEPNP_AP3P too - int num_of_solutions = cv::solveP3P(objectPoints, trackedPoints, cameraMatrix, distCoeffs, rvecs, tvecs, cv::SOLVEPNP_P3P); + int solutionCount = cv::solveP3P(objectPoints, trackedPoints, cameraMatrix, distCoeffs, rvecs, tvecs, cv::SOLVEPNP_AP3P); + if (solutionCount > 0) + { + std::cout << "Solution count: " << solutionCount << "\n"; + + // Find the solution we want + for (int i = 0; i < solutionCount; i++) + { + std::cout << "Translation:\n"; + std::cout << tvecs.at(i); + std::cout << "\n"; + std::cout << "Rotation:\n"; + std::cout << rvecs.at(i); + std::cout << "\n"; + } + + std::cout << "\n"; + + } + // TODO: Work out rotation angles + // TODO: Choose the one solution that makes sense for us diff --git a/tracker-points/ftnoir_tracker_pt.h b/tracker-points/ftnoir_tracker_pt.h index 210c6a01..9b8da4ae 100644 --- a/tracker-points/ftnoir_tracker_pt.h +++ b/tracker-points/ftnoir_tracker_pt.h @@ -62,6 +62,7 @@ private: std::unique_ptr<QLayout> layout; std::vector<vec2> points; + std::vector<vec2> iImagePoints; int preview_width = 320, preview_height = 240; diff --git a/tracker-points/module/point_extractor.cpp b/tracker-points/module/point_extractor.cpp index 1a75a3e3..d1975317 100644 --- a/tracker-points/module/point_extractor.cpp +++ b/tracker-points/module/point_extractor.cpp @@ -239,7 +239,7 @@ static void draw_blobs(cv::Mat& preview_frame, const blob* blobs, unsigned nblob } } -void PointExtractor::extract_points(const pt_frame& frame_, pt_preview& preview_frame_, std::vector<vec2>& points) +void PointExtractor::extract_points(const pt_frame& frame_, pt_preview& preview_frame_, std::vector<vec2>& points, std::vector<vec2>& imagePoints) { const cv::Mat& frame = frame_.as_const<Frame>()->mat; @@ -375,6 +375,7 @@ end: vec2 p; std::tie(p[0], p[1]) = to_screen_pos(b.pos[0], b.pos[1], W, H); points.push_back(p); + imagePoints.push_back(vec2(b.pos[0], b.pos[1])); } } diff --git a/tracker-points/module/point_extractor.h b/tracker-points/module/point_extractor.h index a6103667..2af5c131 100644 --- a/tracker-points/module/point_extractor.h +++ b/tracker-points/module/point_extractor.h @@ -33,16 +33,19 @@ class PointExtractor final : public pt_point_extractor public: // extracts points from frame and draws some processing info into frame, if draw_output is set // dt: time since last call in seconds - void extract_points(const pt_frame& frame, pt_preview& preview_frame, std::vector<vec2>& points) override; + void extract_points(const pt_frame& frame, pt_preview& preview_frame, std::vector<vec2>& points, std::vector<vec2>& imagePoints) override; PointExtractor(const QString& module_name); + +public: + std::vector<blob> blobs; + private: static constexpr int max_blobs = 16; pt_settings s; cv::Mat1b frame_gray_unmasked, frame_bin, frame_gray; - cv::Mat1f hist; - std::vector<blob> blobs; + cv::Mat1f hist; cv::Mat1b ch[3]; void ensure_channel_buffers(const cv::Mat& orig_frame); diff --git a/tracker-points/pt-api.hpp b/tracker-points/pt-api.hpp index a27c7e38..69f74498 100644 --- a/tracker-points/pt-api.hpp +++ b/tracker-points/pt-api.hpp @@ -99,7 +99,7 @@ struct pt_point_extractor : pt_pixel_pos_mixin pt_point_extractor(); virtual ~pt_point_extractor(); - virtual void extract_points(const pt_frame& image, pt_preview& preview_frame, std::vector<vec2>& points) = 0; + virtual void extract_points(const pt_frame& image, pt_preview& preview_frame, std::vector<vec2>& points, std::vector<vec2>& imagePoints) = 0; static f threshold_radius_value(int w, int h, int threshold); }; |