summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorStéphane Lenclud <github@lenclud.com>2019-03-31 09:40:37 +0200
committerStéphane Lenclud <github@lenclud.com>2019-04-24 18:46:12 +0200
commitfeb7026316a4f2ad551b4ea87226c264c5277ca4 (patch)
treea1f76ac4b525a29b016269c94f9f5758c7ddf940
parent8141c4f07b1ddc4555d10a78ea5c3f482c8be04f (diff)
First solveP3P results that are looking consistent.
Translation vector in meters seems to be spot on. Rotation angles still need to be computed. Radial distortion still need to be taken into account.
-rw-r--r--tracker-kinect-face/camera_kinect_ir.cpp33
-rw-r--r--tracker-kinect-face/camera_kinect_ir.h9
-rw-r--r--tracker-points/CMakeLists.txt2
-rw-r--r--tracker-points/ftnoir_tracker_pt.cpp87
-rw-r--r--tracker-points/ftnoir_tracker_pt.h1
-rw-r--r--tracker-points/module/point_extractor.cpp3
-rw-r--r--tracker-points/module/point_extractor.h9
-rw-r--r--tracker-points/pt-api.hpp2
-rw-r--r--video-opencv/impl-camera.cpp2
-rw-r--r--video-opencv/impl.hpp2
-rw-r--r--video/camera.hpp9
11 files changed, 137 insertions, 22 deletions
diff --git a/tracker-kinect-face/camera_kinect_ir.cpp b/tracker-kinect-face/camera_kinect_ir.cpp
index b1975db9..8499003b 100644
--- a/tracker-kinect-face/camera_kinect_ir.cpp
+++ b/tracker-kinect-face/camera_kinect_ir.cpp
@@ -114,7 +114,7 @@ inline void SafeRelease(Interface *& pInterfaceToRelease)
}
}
-bool CameraKinectIr::start(const info& args)
+bool CameraKinectIr::start(info& aInfo)
{
stop();
@@ -146,6 +146,11 @@ bool CameraKinectIr::start(const info& args)
}
SafeRelease(pInfraredFrameSource);
+
+ if (SUCCEEDED(hr))
+ {
+ iKinectSensor->get_CoordinateMapper(&iCoordinateMapper);
+ }
}
@@ -153,6 +158,27 @@ bool CameraKinectIr::start(const info& args)
{
WaitForFirstFrame();
bool success = iMatFrame.ptr() != nullptr;
+ if (success)
+ {
+ // Provide frame info
+ aInfo.width = width;
+ aInfo.height = height;
+
+ CameraIntrinsics intrinsics;
+ hr = iCoordinateMapper->GetDepthCameraIntrinsics(&intrinsics);
+ if (SUCCEEDED(hr))
+ {
+ aInfo.focalLengthX = intrinsics.FocalLengthX;
+ aInfo.focalLengthY = intrinsics.FocalLengthY;
+ aInfo.principalPointX = intrinsics.PrincipalPointX;
+ aInfo.principalPointY = intrinsics.PrincipalPointY;
+ aInfo.radialDistortionFourthOrder = intrinsics.RadialDistortionFourthOrder;
+ aInfo.radialDistortionSecondOrder = intrinsics.RadialDistortionSecondOrder;
+ aInfo.radialDistortionSixthOrder = intrinsics.RadialDistortionSixthOrder;
+ }
+
+ }
+
return success;
}
@@ -172,6 +198,7 @@ void CameraKinectIr::stop()
iKinectSensor->Close();
}
+ SafeRelease(iCoordinateMapper);
SafeRelease(iKinectSensor);
// Free up our memory buffer if any
@@ -253,9 +280,9 @@ bool CameraKinectIr::get_frame_(cv::Mat& frame)
// For scalling to have more precission in the range we are interrested in
min = max - 255;
// See: https://stackoverflow.com/questions/14539498/change-type-of-mat-object-from-cv-32f-to-cv-8u/14539652
- raw.convertTo(raw8, CV_8U, 255.0 / (max - min), -255.0*min / (max - min));
+ raw.convertTo(iRaw8, CV_8U, 255.0 / (max - min), -255.0*min / (max - min));
// Second convert to RGB
- cv::cvtColor(raw8, frame, cv::COLOR_GRAY2BGR);
+ cv::cvtColor(iRaw8, frame, cv::COLOR_GRAY2BGR);
//
success = true;
}
diff --git a/tracker-kinect-face/camera_kinect_ir.h b/tracker-kinect-face/camera_kinect_ir.h
index d9e814a0..a2ddaf76 100644
--- a/tracker-kinect-face/camera_kinect_ir.h
+++ b/tracker-kinect-face/camera_kinect_ir.h
@@ -44,8 +44,8 @@ struct CameraKinectIr final : video::impl::camera
CameraKinectIr();
~CameraKinectIr() override;
-
- [[nodiscard]] bool start(const info& args) override;
+ // From video::impl::camera
+ [[nodiscard]] bool start(info& args) override;
void stop() override;
bool is_open() override;
std::tuple<const video::impl::frame&, bool> get_frame() override;
@@ -65,9 +65,12 @@ private:
// Frame needs to stay alive while we access the data buffer
IInfraredFrame* iInfraredFrame = nullptr;
+ //
+ ICoordinateMapper* iCoordinateMapper = nullptr;
+
video::frame iFrame;
cv::Mat iMatFrame;
- cv::Mat raw8;
+ cv::Mat iRaw8;
float fov = 0;
int width = 0, height = 0;
diff --git a/tracker-points/CMakeLists.txt b/tracker-points/CMakeLists.txt
index b9fcca9e..dd3a0281 100644
--- a/tracker-points/CMakeLists.txt
+++ b/tracker-points/CMakeLists.txt
@@ -2,7 +2,7 @@ find_package(OpenCV QUIET)
if(OpenCV_FOUND)
otr_module(tracker-points-base STATIC)
target_include_directories(${self} SYSTEM PUBLIC ${OpenCV_INCLUDE_DIRS})
- target_link_libraries(${self} opencv_imgproc opentrack-cv opencv_core opentrack-video)
+ target_link_libraries(${self} opencv_imgproc opencv_calib3d opentrack-cv opencv_core opentrack-video)
#set_property(TARGET ${self} PROPERTY OUTPUT_NAME "points-base")
endif()
add_subdirectory(module)
diff --git a/tracker-points/ftnoir_tracker_pt.cpp b/tracker-points/ftnoir_tracker_pt.cpp
index e455a9ed..deef37b2 100644
--- a/tracker-points/ftnoir_tracker_pt.cpp
+++ b/tracker-points/ftnoir_tracker_pt.cpp
@@ -20,6 +20,8 @@
#include <opencv2\calib3d.hpp>
+#include <iostream>
+
using namespace options;
namespace pt_impl {
@@ -74,7 +76,8 @@ void Tracker_PT::run()
if (preview_visible)
*preview_frame = *frame;
- point_extractor->extract_points(*frame, *preview_frame, points);
+ iImagePoints.clear();
+ point_extractor->extract_points(*frame, *preview_frame, points, iImagePoints);
point_count.store(points.size(), std::memory_order_relaxed);
const bool success = points.size() >= PointModel::N_POINTS;
@@ -98,19 +101,67 @@ void Tracker_PT::run()
// TODO: Solve with OpenCV
- std::vector<cv::Point3f> objectPoints;
- //TODO: Stuff object points in that vector
+ // Construct the points defining the object we want to detect based on settings.
+ // We are converting them from millimeters to meters.
+ // TODO: Need to support clip too. That's cap only for now.
+ std::vector<cv::Point3f> objectPoints;
+ objectPoints.push_back(cv::Point3f(s.cap_x/1000.0,0,0)); // Right
+ objectPoints.push_back(cv::Point3f(-s.cap_x/1000.0, 0, 0)); // Left
+ objectPoints.push_back(cv::Point3f(0, s.cap_y/1000.0, s.cap_z/1000.0)); // Top
+
std::vector<cv::Point2f> trackedPoints;
- //TODO: Stuff bitmap point in there making sure they match the order of the object point
+ //TODO: Stuff bitmap point in there making sure they match the order of the object point
+ // Find top most point
+ int topPointIndex = -1;
+ int maxY = 0;
+ for (int i = 0; i < 3; i++)
+ {
+ if (iImagePoints[i][1]>maxY)
+ {
+ maxY = iImagePoints[i][1];
+ topPointIndex = i;
+ }
+ }
+
+ int rightPointIndex = -1;
+ int maxX = 0;
+
+ // Find right most point
+ for (int i = 0; i < 3; i++)
+ {
+ // Excluding top most point
+ if (i!=topPointIndex && iImagePoints[i][0] > maxX)
+ {
+ maxX = iImagePoints[i][0];
+ rightPointIndex = i;
+ }
+ }
+
+ // Find left most point
+ int leftPointIndex = -1;
+ for (int i = 0; i < 3; i++)
+ {
+ // Excluding top most point
+ if (i != topPointIndex && i != rightPointIndex)
+ {
+ leftPointIndex = i;
+ break;
+ }
+ }
+
+ //
+ trackedPoints.push_back(cv::Point2f(iImagePoints[rightPointIndex][0], iImagePoints[rightPointIndex][1]));
+ trackedPoints.push_back(cv::Point2f(iImagePoints[leftPointIndex][0], iImagePoints[leftPointIndex][1]));
+ trackedPoints.push_back(cv::Point2f(iImagePoints[topPointIndex][0], iImagePoints[topPointIndex][1]));
// Create our camera matrix
- // TODO: Just do that once, use data memeber instead
+ // TODO: Just do that once, use data member instead
// Double or Float?
cv::Mat cameraMatrix;
cameraMatrix.create(3, 3, CV_64FC1);
cameraMatrix.setTo(cv::Scalar(0));
cameraMatrix.at<double>(0, 0) = camera->info.focalLengthX;
- cameraMatrix.at<double>(1, 1) = camera->info.focalLengthX;
+ cameraMatrix.at<double>(1, 1) = camera->info.focalLengthY;
cameraMatrix.at<double>(0, 2) = camera->info.principalPointX;
cameraMatrix.at<double>(1, 2) = camera->info.principalPointY;
cameraMatrix.at<double>(2, 2) = 1;
@@ -123,11 +174,33 @@ void Tracker_PT::run()
distCoeffs.at<double>(i, 0) = 0;
}
+ // Define our solution arrays
+ // They will receive up to 4 solutions for our P3P problem
std::vector<cv::Mat> rvecs, tvecs;
// TODO: try SOLVEPNP_AP3P too
- int num_of_solutions = cv::solveP3P(objectPoints, trackedPoints, cameraMatrix, distCoeffs, rvecs, tvecs, cv::SOLVEPNP_P3P);
+ int solutionCount = cv::solveP3P(objectPoints, trackedPoints, cameraMatrix, distCoeffs, rvecs, tvecs, cv::SOLVEPNP_AP3P);
+ if (solutionCount > 0)
+ {
+ std::cout << "Solution count: " << solutionCount << "\n";
+
+ // Find the solution we want
+ for (int i = 0; i < solutionCount; i++)
+ {
+ std::cout << "Translation:\n";
+ std::cout << tvecs.at(i);
+ std::cout << "\n";
+ std::cout << "Rotation:\n";
+ std::cout << rvecs.at(i);
+ std::cout << "\n";
+ }
+
+ std::cout << "\n";
+
+ }
+ // TODO: Work out rotation angles
+ // TODO: Choose the one solution that makes sense for us
diff --git a/tracker-points/ftnoir_tracker_pt.h b/tracker-points/ftnoir_tracker_pt.h
index 210c6a01..9b8da4ae 100644
--- a/tracker-points/ftnoir_tracker_pt.h
+++ b/tracker-points/ftnoir_tracker_pt.h
@@ -62,6 +62,7 @@ private:
std::unique_ptr<QLayout> layout;
std::vector<vec2> points;
+ std::vector<vec2> iImagePoints;
int preview_width = 320, preview_height = 240;
diff --git a/tracker-points/module/point_extractor.cpp b/tracker-points/module/point_extractor.cpp
index 1a75a3e3..d1975317 100644
--- a/tracker-points/module/point_extractor.cpp
+++ b/tracker-points/module/point_extractor.cpp
@@ -239,7 +239,7 @@ static void draw_blobs(cv::Mat& preview_frame, const blob* blobs, unsigned nblob
}
}
-void PointExtractor::extract_points(const pt_frame& frame_, pt_preview& preview_frame_, std::vector<vec2>& points)
+void PointExtractor::extract_points(const pt_frame& frame_, pt_preview& preview_frame_, std::vector<vec2>& points, std::vector<vec2>& imagePoints)
{
const cv::Mat& frame = frame_.as_const<Frame>()->mat;
@@ -375,6 +375,7 @@ end:
vec2 p;
std::tie(p[0], p[1]) = to_screen_pos(b.pos[0], b.pos[1], W, H);
points.push_back(p);
+ imagePoints.push_back(vec2(b.pos[0], b.pos[1]));
}
}
diff --git a/tracker-points/module/point_extractor.h b/tracker-points/module/point_extractor.h
index a6103667..2af5c131 100644
--- a/tracker-points/module/point_extractor.h
+++ b/tracker-points/module/point_extractor.h
@@ -33,16 +33,19 @@ class PointExtractor final : public pt_point_extractor
public:
// extracts points from frame and draws some processing info into frame, if draw_output is set
// dt: time since last call in seconds
- void extract_points(const pt_frame& frame, pt_preview& preview_frame, std::vector<vec2>& points) override;
+ void extract_points(const pt_frame& frame, pt_preview& preview_frame, std::vector<vec2>& points, std::vector<vec2>& imagePoints) override;
PointExtractor(const QString& module_name);
+
+public:
+ std::vector<blob> blobs;
+
private:
static constexpr int max_blobs = 16;
pt_settings s;
cv::Mat1b frame_gray_unmasked, frame_bin, frame_gray;
- cv::Mat1f hist;
- std::vector<blob> blobs;
+ cv::Mat1f hist;
cv::Mat1b ch[3];
void ensure_channel_buffers(const cv::Mat& orig_frame);
diff --git a/tracker-points/pt-api.hpp b/tracker-points/pt-api.hpp
index a27c7e38..69f74498 100644
--- a/tracker-points/pt-api.hpp
+++ b/tracker-points/pt-api.hpp
@@ -99,7 +99,7 @@ struct pt_point_extractor : pt_pixel_pos_mixin
pt_point_extractor();
virtual ~pt_point_extractor();
- virtual void extract_points(const pt_frame& image, pt_preview& preview_frame, std::vector<vec2>& points) = 0;
+ virtual void extract_points(const pt_frame& image, pt_preview& preview_frame, std::vector<vec2>& points, std::vector<vec2>& imagePoints) = 0;
static f threshold_radius_value(int w, int h, int threshold);
};
diff --git a/video-opencv/impl-camera.cpp b/video-opencv/impl-camera.cpp
index dddd463f..8a540134 100644
--- a/video-opencv/impl-camera.cpp
+++ b/video-opencv/impl-camera.cpp
@@ -30,7 +30,7 @@ bool cam::is_open()
return !!cap;
}
-bool cam::start(const info& args)
+bool cam::start(info& args)
{
stop();
cap.emplace(idx);
diff --git a/video-opencv/impl.hpp b/video-opencv/impl.hpp
index f756fb19..3d793490 100644
--- a/video-opencv/impl.hpp
+++ b/video-opencv/impl.hpp
@@ -34,7 +34,7 @@ struct cam final : camera
cam(int idx);
~cam() override;
- bool start(const info& args) override;
+ bool start(info& args) override;
void stop() override;
bool is_open() override;
std::tuple<const frame&, bool> get_frame() override;
diff --git a/video/camera.hpp b/video/camera.hpp
index 4ad8417c..be9ef711 100644
--- a/video/camera.hpp
+++ b/video/camera.hpp
@@ -49,12 +49,19 @@ struct OTR_VIDEO_EXPORT camera
struct info final
{
int width = 0, height = 0, fps = 0;
+ float focalLengthX = 0.0f;
+ float focalLengthY = 0.0f;
+ float principalPointX = 0.0f;
+ float principalPointY = 0.0f;
+ float radialDistortionSecondOrder = 0.0f;
+ float radialDistortionFourthOrder = 0.0f;
+ float radialDistortionSixthOrder = 0.0f;
};
camera();
virtual ~camera();
- [[nodiscard]] virtual bool start(const info& args) = 0;
+ [[nodiscard]] virtual bool start(info& args) = 0;
virtual void stop() = 0;
virtual bool is_open() = 0;