diff options
author | Stéphane Lenclud <github@lenclud.com> | 2019-04-13 12:59:22 +0200 |
---|---|---|
committer | Stéphane Lenclud <github@lenclud.com> | 2019-04-24 18:46:12 +0200 |
commit | abaf23d7043c42a07e0d71fc0a17a8264c828d48 (patch) | |
tree | b5e8f14e4730bb45d26d2858a26ac2c6cbfc23a8 /tracker-easy/tracker-easy.cpp | |
parent | ac8f7a3e7b29a5ce60ae751ee3a97e3b344c8f3f (diff) |
EasyTracker: Adding namespace. Reducing number of classes.
Diffstat (limited to 'tracker-easy/tracker-easy.cpp')
-rw-r--r-- | tracker-easy/tracker-easy.cpp | 564 |
1 files changed, 283 insertions, 281 deletions
diff --git a/tracker-easy/tracker-easy.cpp b/tracker-easy/tracker-easy.cpp index 5fd1952c..5fe93b0e 100644 --- a/tracker-easy/tracker-easy.cpp +++ b/tracker-easy/tracker-easy.cpp @@ -11,7 +11,7 @@ #include "video/video-widget.hpp" #include "compat/math-imports.hpp" #include "compat/check-visible.hpp" - +#include "cv-point-extractor.h" #include "tracker-easy-api.h" #include <QHBoxLayout> @@ -26,358 +26,360 @@ using namespace options; - -EasyTracker::EasyTracker(pointer<IEasyTrackerTraits> const& traits) : - traits { traits }, - s { traits->get_module_name() }, - point_extractor { traits->make_point_extractor() }, - iPreview{ preview_width, preview_height } +namespace EasyTracker { - cv::setBreakOnError(true); - cv::setNumThreads(1); - connect(s.b.get(), &bundle_::saving, this, &EasyTracker::maybe_reopen_camera, Qt::DirectConnection); - connect(s.b.get(), &bundle_::reloading, this, &EasyTracker::maybe_reopen_camera, Qt::DirectConnection); - - connect(&s.fov, value_::value_changed<int>(), this, &EasyTracker::set_fov, Qt::DirectConnection); - set_fov(s.fov); -} - -EasyTracker::~EasyTracker() -{ - // - cv::destroyWindow("Preview"); + Tracker::Tracker() : + s{ KModuleName }, + point_extractor{ std::make_unique<CvPointExtractor>() }, + iPreview{ preview_width, preview_height } + { + cv::setBreakOnError(true); + cv::setNumThreads(1); - requestInterruption(); - wait(); + connect(s.b.get(), &bundle_::saving, this, &Tracker::maybe_reopen_camera, Qt::DirectConnection); + connect(s.b.get(), &bundle_::reloading, this, &Tracker::maybe_reopen_camera, Qt::DirectConnection); - QMutexLocker l(&camera_mtx); - camera->stop(); -} + connect(&s.fov, value_::value_changed<int>(), this, &Tracker::set_fov, Qt::DirectConnection); + set_fov(s.fov); + } + Tracker::~Tracker() + { + // + cv::destroyWindow("Preview"); -// Compute Euler angles from ratation matrix -cv::Vec3f EulerAngles(cv::Mat &R) -{ + requestInterruption(); + wait(); - float sy = sqrt(R.at<double>(0, 0) * R.at<double>(0, 0) + R.at<double>(1, 0) * R.at<double>(1, 0)); + QMutexLocker l(&camera_mtx); + camera->stop(); + } - bool singular = sy < 1e-6; // If - float x, y, z; - if (!singular) - { - x = atan2(R.at<double>(2, 1), R.at<double>(2, 2)); - y = atan2(-R.at<double>(2, 0), sy); - z = atan2(R.at<double>(1, 0), R.at<double>(0, 0)); - } - else + // Compute Euler angles from ratation matrix + cv::Vec3f EulerAngles(cv::Mat &R) { - x = atan2(-R.at<double>(1, 2), R.at<double>(1, 1)); - y = atan2(-R.at<double>(2, 0), sy); - z = 0; - } - // Convert to degrees - return cv::Vec3f(x* 180 / CV_PI, y* 180 / CV_PI, z* 180 / CV_PI); -} + float sy = sqrt(R.at<double>(0, 0) * R.at<double>(0, 0) + R.at<double>(1, 0) * R.at<double>(1, 0)); + bool singular = sy < 1e-6; // If -void getEulerAngles(cv::Mat &rotCamerMatrix, cv::Vec3d &eulerAngles) -{ + float x, y, z; + if (!singular) + { + x = atan2(R.at<double>(2, 1), R.at<double>(2, 2)); + y = atan2(-R.at<double>(2, 0), sy); + z = atan2(R.at<double>(1, 0), R.at<double>(0, 0)); + } + else + { + x = atan2(-R.at<double>(1, 2), R.at<double>(1, 1)); + y = atan2(-R.at<double>(2, 0), sy); + z = 0; + } - cv::Mat cameraMatrix, rotMatrix, transVect, rotMatrixX, rotMatrixY, rotMatrixZ; - double* _r = rotCamerMatrix.ptr<double>(); - double projMatrix[12] = { _r[0],_r[1],_r[2],0, - _r[3],_r[4],_r[5],0, - _r[6],_r[7],_r[8],0 }; - - cv::decomposeProjectionMatrix(cv::Mat(3, 4, CV_64FC1, projMatrix), - cameraMatrix, - rotMatrix, - transVect, - rotMatrixX, - rotMatrixY, - rotMatrixZ, - eulerAngles); -} + // Convert to degrees + return cv::Vec3f(x * 180 / CV_PI, y * 180 / CV_PI, z * 180 / CV_PI); + } -void EasyTracker::run() -{ - maybe_reopen_camera(); + void getEulerAngles(cv::Mat &rotCamerMatrix, cv::Vec3d &eulerAngles) + { - while(!isInterruptionRequested()) - { - bool new_frame = false; + cv::Mat cameraMatrix, rotMatrix, transVect, rotMatrixX, rotMatrixY, rotMatrixZ; + double* _r = rotCamerMatrix.ptr<double>(); + double projMatrix[12] = { _r[0],_r[1],_r[2],0, + _r[3],_r[4],_r[5],0, + _r[6],_r[7],_r[8],0 }; + + cv::decomposeProjectionMatrix(cv::Mat(3, 4, CV_64FC1, projMatrix), + cameraMatrix, + rotMatrix, + transVect, + rotMatrixX, + rotMatrixY, + rotMatrixZ, + eulerAngles); + } - { - QMutexLocker l(&camera_mtx); - if (camera) - std::tie(iFrame, new_frame) = camera->get_frame(); - } + void Tracker::run() + { + maybe_reopen_camera(); - if (new_frame) + while (!isInterruptionRequested()) { - //TODO: We should not assume channel size of 1 byte - iMatFrame = cv::Mat(iFrame.height, iFrame.width, CV_MAKETYPE(CV_8U,iFrame.channels), iFrame.data, iFrame.stride); + bool new_frame = false; - - const bool preview_visible = check_is_visible(); - if (preview_visible) { - iPreview = iMatFrame; - } + QMutexLocker l(&camera_mtx); - iPoints.clear(); - point_extractor->extract_points(iMatFrame, (preview_visible?&iPreview.iFrameRgb:nullptr), iPoints); - point_count.store(iPoints.size(), std::memory_order_relaxed); - - - if (preview_visible) - { - //iPreview = iMatFrame; - cv::imshow("Preview", iPreview.iFrameRgb); - cv::waitKey(1); + if (camera) + std::tie(iFrame, new_frame) = camera->get_frame(); } - else + + if (new_frame) { - cv::destroyWindow("Preview"); - } + //TODO: We should not assume channel size of 1 byte + iMatFrame = cv::Mat(iFrame.height, iFrame.width, CV_MAKETYPE(CV_8U, iFrame.channels), iFrame.data, iFrame.stride); + + + const bool preview_visible = check_is_visible(); + if (preview_visible) + { + iPreview = iMatFrame; + } - const bool success = iPoints.size() >= KPointCount; + iPoints.clear(); + point_extractor->extract_points(iMatFrame, (preview_visible ? &iPreview.iFrameRgb : nullptr), iPoints); + point_count.store(iPoints.size(), std::memory_order_relaxed); - int topPointIndex = -1; - { - QMutexLocker l(¢er_lock); + if (preview_visible) + { + //iPreview = iMatFrame; + cv::imshow("Preview", iPreview.iFrameRgb); + cv::waitKey(1); + } + else + { + cv::destroyWindow("Preview"); + } + + const bool success = iPoints.size() >= KPointCount; + + int topPointIndex = -1; - if (success) { - ever_success.store(true, std::memory_order_relaxed); - - // Solve P3P problem with OpenCV - - // Construct the points defining the object we want to detect based on settings. - // We are converting them from millimeters to centimeters. - // TODO: Need to support clip too. That's cap only for now. - // s.active_model_panel != PointModel::Clip - - std::vector<cv::Point3f> objectPoints; - objectPoints.push_back(cv::Point3f(s.cap_x/10.0, s.cap_z / 10.0, -s.cap_y / 10.0)); // Right - objectPoints.push_back(cv::Point3f(-s.cap_x/10.0, s.cap_z / 10.0, -s.cap_y / 10.0)); // Left - objectPoints.push_back(cv::Point3f(0, 0, 0)); // Top - - //Bitmap origin is top left - std::vector<cv::Point2f> trackedPoints; - // Stuff bitmap point in there making sure they match the order of the object point - // Find top most point, that's the one with min Y as we assume our guy's head is not up side down - - int minY = std::numeric_limits<int>::max(); - for (int i = 0; i < 3; i++) + QMutexLocker l(¢er_lock); + + if (success) { - if (iPoints[i][1]<minY) + ever_success.store(true, std::memory_order_relaxed); + + // Solve P3P problem with OpenCV + + // Construct the points defining the object we want to detect based on settings. + // We are converting them from millimeters to centimeters. + // TODO: Need to support clip too. That's cap only for now. + // s.active_model_panel != PointModel::Clip + + std::vector<cv::Point3f> objectPoints; + objectPoints.push_back(cv::Point3f(s.cap_x / 10.0, s.cap_z / 10.0, -s.cap_y / 10.0)); // Right + objectPoints.push_back(cv::Point3f(-s.cap_x / 10.0, s.cap_z / 10.0, -s.cap_y / 10.0)); // Left + objectPoints.push_back(cv::Point3f(0, 0, 0)); // Top + + //Bitmap origin is top left + std::vector<cv::Point2f> trackedPoints; + // Stuff bitmap point in there making sure they match the order of the object point + // Find top most point, that's the one with min Y as we assume our guy's head is not up side down + + int minY = std::numeric_limits<int>::max(); + for (int i = 0; i < 3; i++) { - minY = iPoints[i][1]; - topPointIndex = i; + if (iPoints[i][1] < minY) + { + minY = iPoints[i][1]; + topPointIndex = i; + } } - } - int rightPointIndex = -1; - int maxX = 0; + int rightPointIndex = -1; + int maxX = 0; - // Find right most point - for (int i = 0; i < 3; i++) - { - // Excluding top most point - if (i!=topPointIndex && iPoints[i][0] > maxX) + // Find right most point + for (int i = 0; i < 3; i++) { - maxX = iPoints[i][0]; - rightPointIndex = i; + // Excluding top most point + if (i != topPointIndex && iPoints[i][0] > maxX) + { + maxX = iPoints[i][0]; + rightPointIndex = i; + } } - } - // Find left most point - int leftPointIndex = -1; - for (int i = 0; i < 3; i++) - { - // Excluding top most point - if (i != topPointIndex && i != rightPointIndex) + // Find left most point + int leftPointIndex = -1; + for (int i = 0; i < 3; i++) { - leftPointIndex = i; - break; + // Excluding top most point + if (i != topPointIndex && i != rightPointIndex) + { + leftPointIndex = i; + break; + } } - } - // - trackedPoints.push_back(cv::Point2f(iPoints[rightPointIndex][0], iPoints[rightPointIndex][1])); - trackedPoints.push_back(cv::Point2f(iPoints[leftPointIndex][0], iPoints[leftPointIndex][1])); - trackedPoints.push_back(cv::Point2f(iPoints[topPointIndex][0], iPoints[topPointIndex][1])); - - std::cout << "Object: " << objectPoints << "\n"; - std::cout << "Points: " << trackedPoints << "\n"; - - - // Create our camera matrix - // TODO: Just do that once, use data member instead - // Double or Float? - cv::Mat cameraMatrix; - cameraMatrix.create(3, 3, CV_64FC1); - cameraMatrix.setTo(cv::Scalar(0)); - cameraMatrix.at<double>(0, 0) = iCameraInfo.focalLengthX; - cameraMatrix.at<double>(1, 1) = iCameraInfo.focalLengthY; - cameraMatrix.at<double>(0, 2) = iCameraInfo.principalPointX; - cameraMatrix.at<double>(1, 2) = iCameraInfo.principalPointY; - cameraMatrix.at<double>(2, 2) = 1; - - // Create distortion cooefficients - cv::Mat distCoeffs = cv::Mat::zeros(8, 1, CV_64FC1); - // As per OpenCV docs they should be thus: k1, k2, p1, p2, k3, k4, k5, k6 - distCoeffs.at<double>(0, 0) = 0; // Radial first order - distCoeffs.at<double>(1, 0) = iCameraInfo.radialDistortionSecondOrder; // Radial second order - distCoeffs.at<double>(2, 0) = 0; // Tangential first order - distCoeffs.at<double>(3, 0) = 0; // Tangential second order - distCoeffs.at<double>(4, 0) = 0; // Radial third order - distCoeffs.at<double>(5, 0) = iCameraInfo.radialDistortionFourthOrder; // Radial fourth order - distCoeffs.at<double>(6, 0) = 0; // Radial fith order - distCoeffs.at<double>(7, 0) = iCameraInfo.radialDistortionSixthOrder; // Radial sixth order - - // Define our solution arrays - // They will receive up to 4 solutions for our P3P problem - - - // TODO: try SOLVEPNP_AP3P too - iAngles.clear(); - iBestSolutionIndex = -1; - int solutionCount = cv::solveP3P(objectPoints, trackedPoints, cameraMatrix, distCoeffs, iRotations, iTranslations, cv::SOLVEPNP_P3P); - - if (solutionCount > 0) - { - std::cout << "Solution count: " << solutionCount << "\n"; - int minPitch = std::numeric_limits<int>::max(); - // Find the solution we want - for (int i = 0; i < solutionCount; i++) + // + trackedPoints.push_back(cv::Point2f(iPoints[rightPointIndex][0], iPoints[rightPointIndex][1])); + trackedPoints.push_back(cv::Point2f(iPoints[leftPointIndex][0], iPoints[leftPointIndex][1])); + trackedPoints.push_back(cv::Point2f(iPoints[topPointIndex][0], iPoints[topPointIndex][1])); + + std::cout << "Object: " << objectPoints << "\n"; + std::cout << "Points: " << trackedPoints << "\n"; + + + // Create our camera matrix + // TODO: Just do that once, use data member instead + // Double or Float? + cv::Mat cameraMatrix; + cameraMatrix.create(3, 3, CV_64FC1); + cameraMatrix.setTo(cv::Scalar(0)); + cameraMatrix.at<double>(0, 0) = iCameraInfo.focalLengthX; + cameraMatrix.at<double>(1, 1) = iCameraInfo.focalLengthY; + cameraMatrix.at<double>(0, 2) = iCameraInfo.principalPointX; + cameraMatrix.at<double>(1, 2) = iCameraInfo.principalPointY; + cameraMatrix.at<double>(2, 2) = 1; + + // Create distortion cooefficients + cv::Mat distCoeffs = cv::Mat::zeros(8, 1, CV_64FC1); + // As per OpenCV docs they should be thus: k1, k2, p1, p2, k3, k4, k5, k6 + distCoeffs.at<double>(0, 0) = 0; // Radial first order + distCoeffs.at<double>(1, 0) = iCameraInfo.radialDistortionSecondOrder; // Radial second order + distCoeffs.at<double>(2, 0) = 0; // Tangential first order + distCoeffs.at<double>(3, 0) = 0; // Tangential second order + distCoeffs.at<double>(4, 0) = 0; // Radial third order + distCoeffs.at<double>(5, 0) = iCameraInfo.radialDistortionFourthOrder; // Radial fourth order + distCoeffs.at<double>(6, 0) = 0; // Radial fith order + distCoeffs.at<double>(7, 0) = iCameraInfo.radialDistortionSixthOrder; // Radial sixth order + + // Define our solution arrays + // They will receive up to 4 solutions for our P3P problem + + + // TODO: try SOLVEPNP_AP3P too + iAngles.clear(); + iBestSolutionIndex = -1; + int solutionCount = cv::solveP3P(objectPoints, trackedPoints, cameraMatrix, distCoeffs, iRotations, iTranslations, cv::SOLVEPNP_P3P); + + if (solutionCount > 0) { - std::cout << "Translation:\n"; - std::cout << iTranslations.at(i); - std::cout << "\n"; - std::cout << "Rotation:\n"; - //std::cout << rvecs.at(i); - cv::Mat rotationCameraMatrix; - cv::Rodrigues(iRotations[i], rotationCameraMatrix); - cv::Vec3d angles; - getEulerAngles(rotationCameraMatrix,angles); - iAngles.push_back(angles); - - // Check if pitch is closest to zero - int absolutePitch = std::abs(angles[0]); - if (minPitch > absolutePitch) + std::cout << "Solution count: " << solutionCount << "\n"; + int minPitch = std::numeric_limits<int>::max(); + // Find the solution we want + for (int i = 0; i < solutionCount; i++) { - minPitch = absolutePitch; - iBestSolutionIndex = i; + std::cout << "Translation:\n"; + std::cout << iTranslations.at(i); + std::cout << "\n"; + std::cout << "Rotation:\n"; + //std::cout << rvecs.at(i); + cv::Mat rotationCameraMatrix; + cv::Rodrigues(iRotations[i], rotationCameraMatrix); + cv::Vec3d angles; + getEulerAngles(rotationCameraMatrix, angles); + iAngles.push_back(angles); + + // Check if pitch is closest to zero + int absolutePitch = std::abs(angles[0]); + if (minPitch > absolutePitch) + { + minPitch = absolutePitch; + iBestSolutionIndex = i; + } + + //cv::Vec3f angles=EulerAngles(quaternion); + std::cout << angles; + std::cout << "\n"; } - //cv::Vec3f angles=EulerAngles(quaternion); - std::cout << angles; std::cout << "\n"; + } - std::cout << "\n"; - } - } + // Send solution data back to main thread + QMutexLocker l2(&data_lock); + if (iBestSolutionIndex != -1) + { + iBestAngles = iAngles[iBestSolutionIndex]; + iBestTranslation = iTranslations[iBestSolutionIndex]; + } - // Send solution data back to main thread - QMutexLocker l2(&data_lock); - if (iBestSolutionIndex != -1) - { - iBestAngles = iAngles[iBestSolutionIndex]; - iBestTranslation = iTranslations[iBestSolutionIndex]; } - } - - if (preview_visible) - { - if (topPointIndex != -1) + if (preview_visible) { - // Render a cross to indicate which point is the head - iPreview.draw_head_center(iPoints[topPointIndex][0], iPoints[topPointIndex][1]); - } - - widget->update_image(iPreview.get_bitmap()); + if (topPointIndex != -1) + { + // Render a cross to indicate which point is the head + iPreview.draw_head_center(iPoints[topPointIndex][0], iPoints[topPointIndex][1]); + } - auto [ w, h ] = widget->preview_size(); - if (w != preview_width || h != preview_height) - { - // Resize preivew if widget size has changed - preview_width = w; preview_height = h; - iPreview = Preview(w, h); + widget->update_image(iPreview.get_bitmap()); + + auto[w, h] = widget->preview_size(); + if (w != preview_width || h != preview_height) + { + // Resize preivew if widget size has changed + preview_width = w; preview_height = h; + iPreview = Preview(w, h); + } } } } } -} -bool EasyTracker::maybe_reopen_camera() -{ - QMutexLocker l(&camera_mtx); + bool Tracker::maybe_reopen_camera() + { + QMutexLocker l(&camera_mtx); - return camera->start(iCameraInfo); -} + return camera->start(iCameraInfo); + } -void EasyTracker::set_fov(int value) -{ - QMutexLocker l(&camera_mtx); + void Tracker::set_fov(int value) + { + QMutexLocker l(&camera_mtx); -} + } -module_status EasyTracker::start_tracker(QFrame* video_frame) -{ - //video_frame->setAttribute(Qt::WA_NativeWindow); + module_status Tracker::start_tracker(QFrame* video_frame) + { + //video_frame->setAttribute(Qt::WA_NativeWindow); - widget = std::make_unique<video_widget>(video_frame); - layout = std::make_unique<QHBoxLayout>(video_frame); - layout->setContentsMargins(0, 0, 0, 0); - layout->addWidget(widget.get()); - video_frame->setLayout(layout.get()); - //video_widget->resize(video_frame->width(), video_frame->height()); - video_frame->show(); + widget = std::make_unique<video_widget>(video_frame); + layout = std::make_unique<QHBoxLayout>(video_frame); + layout->setContentsMargins(0, 0, 0, 0); + layout->addWidget(widget.get()); + video_frame->setLayout(layout.get()); + //video_widget->resize(video_frame->width(), video_frame->height()); + video_frame->show(); - // Create our camera - camera = video::make_camera(s.camera_name); + // Create our camera + camera = video::make_camera(s.camera_name); - start(QThread::HighPriority); + start(QThread::HighPriority); - return {}; -} + return {}; + } -void EasyTracker::data(double *data) -{ - if (ever_success.load(std::memory_order_relaxed)) + void Tracker::data(double *data) { - // Get data back from tracker thread - QMutexLocker l(&data_lock); - data[Yaw] = iBestAngles[1]; - data[Pitch] = iBestAngles[0]; - data[Roll] = iBestAngles[2]; - data[TX] = iBestTranslation[0]; - data[TY] = iBestTranslation[1]; - data[TZ] = iBestTranslation[2]; + if (ever_success.load(std::memory_order_relaxed)) + { + // Get data back from tracker thread + QMutexLocker l(&data_lock); + data[Yaw] = iBestAngles[1]; + data[Pitch] = iBestAngles[0]; + data[Roll] = iBestAngles[2]; + data[TX] = iBestTranslation[0]; + data[TY] = iBestTranslation[1]; + data[TZ] = iBestTranslation[2]; + } } -} -bool EasyTracker::center() -{ - QMutexLocker l(¢er_lock); - //TODO: Do we need to do anything there? - return false; -} + bool Tracker::center() + { + QMutexLocker l(¢er_lock); + //TODO: Do we need to do anything there? + return false; + } -int EasyTracker::get_n_points() -{ - return (int)point_count.load(std::memory_order_relaxed); -} + int Tracker::get_n_points() + { + return (int)point_count.load(std::memory_order_relaxed); + } +} |