summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tracker-pt/ftnoir_tracker_pt.cpp47
-rw-r--r--tracker-pt/ftnoir_tracker_pt.h6
-rw-r--r--tracker-pt/ftnoir_tracker_pt_settings.h28
-rw-r--r--tracker-pt/point_extractor.cpp10
-rw-r--r--tracker-pt/point_extractor.h6
-rw-r--r--tracker-pt/point_tracker.cpp152
-rw-r--r--tracker-pt/point_tracker.h92
7 files changed, 183 insertions, 158 deletions
diff --git a/tracker-pt/ftnoir_tracker_pt.cpp b/tracker-pt/ftnoir_tracker_pt.cpp
index d680698f..de137bea 100644
--- a/tracker-pt/ftnoir_tracker_pt.cpp
+++ b/tracker-pt/ftnoir_tracker_pt.cpp
@@ -56,7 +56,7 @@ void Tracker_PT::reset_command(Command command)
commands &= ~command;
}
-bool Tracker_PT::get_focal_length(float& ret)
+bool Tracker_PT::get_focal_length(f& ret)
{
QMutexLocker l(&camera_mtx);
CamInfo info;
@@ -104,7 +104,7 @@ void Tracker_PT::run()
{
const auto& points = point_extractor.extract_points(frame_);
- float fx;
+ f fx;
if (!get_focal_length(fx))
continue;
@@ -118,7 +118,7 @@ void Tracker_PT::run()
Affine X_CM = pose();
- std::function<void(const cv::Vec2f&, const cv::Scalar)> fun = [&](const cv::Vec2f& p, const cv::Scalar color)
+ std::function<void(const vec2&, const cv::Scalar)> fun = [&](const vec2& p, const cv::Scalar color)
{
using std::round;
cv::Point p2(round(p[0] * frame_.cols + frame_.cols/2),
@@ -141,10 +141,10 @@ void Tracker_PT::run()
}
{
- Affine X_MH(cv::Matx33f::eye(), cv::Vec3f(s.t_MH_x, s.t_MH_y, s.t_MH_z)); // just copy pasted these lines from below
+ Affine X_MH(mat33::eye(), vec3(s.t_MH_x, s.t_MH_y, s.t_MH_z)); // just copy pasted these lines from below
Affine X_GH = X_CM * X_MH;
- cv::Vec3f p = X_GH.t; // head (center?) position in global space
- cv::Vec2f p_(p[0] / p[2] * fx, p[1] / p[2] * fx); // projected to screen
+ vec3 p = X_GH.t; // head (center?) position in global space
+ vec2 p_(p[0] / p[2] * fx, p[1] / p[2] * fx); // projected to screen
fun(p_, cv::Scalar(0, 0, 255));
}
@@ -201,33 +201,34 @@ void Tracker_PT::data(double *data)
{
Affine X_CM = pose();
- Affine X_MH(cv::Matx33f::eye(), cv::Vec3f(s.t_MH_x, s.t_MH_y, s.t_MH_z));
+ Affine X_MH(mat33::eye(), vec3(s.t_MH_x, s.t_MH_y, s.t_MH_z));
Affine X_GH = X_CM * X_MH;
- cv::Matx33f R = X_GH.R;
- cv::Vec3f t = X_GH.t;
-
// translate rotation matrix from opengl (G) to roll-pitch-yaw (E) frame
// -z -> x, y -> z, x -> -y
- cv::Matx33f R_EG(0, 0,-1,
- -1, 0, 0,
- 0, 1, 0);
- R = R_EG * R * R_EG.t();
+ mat33 R_EG(0, 0,-1,
+ -1, 0, 0,
+ 0, 1, 0);
+ mat33 R = R_EG * X_GH.R * R_EG.t();
using std::atan2;
using std::sqrt;
// extract rotation angles
- float alpha, beta, gamma;
- beta = atan2( -R(2,0), sqrt(R(2,1)*R(2,1) + R(2,2)*R(2,2)) );
- alpha = atan2( R(1,0), R(0,0));
- gamma = atan2( R(2,1), R(2,2));
-
- // extract rotation angles
- data[Yaw] = rad2deg * alpha;
- data[Pitch] = -rad2deg * beta;
- data[Roll] = rad2deg * gamma;
+ {
+ f alpha, beta, gamma;
+ beta = atan2( -R(2,0), sqrt(R(2,1)*R(2,1) + R(2,2)*R(2,2)) );
+ alpha = atan2( R(1,0), R(0,0));
+ gamma = atan2( R(2,1), R(2,2));
+
+ data[Yaw] = rad2deg * alpha;
+ data[Pitch] = -rad2deg * beta;
+ data[Roll] = rad2deg * gamma;
+ }
// get translation(s)
+
+ const vec3& t = X_GH.t;
+
// convert to cm
data[TX] = t[0] / 10;
data[TY] = t[1] / 10;
diff --git a/tracker-pt/ftnoir_tracker_pt.h b/tracker-pt/ftnoir_tracker_pt.h
index d3b96f1d..c9497df5 100644
--- a/tracker-pt/ftnoir_tracker_pt.h
+++ b/tracker-pt/ftnoir_tracker_pt.h
@@ -29,7 +29,7 @@ class TrackerDialog_PT;
//-----------------------------------------------------------------------------
// Constantly processes the tracking chain in a separate thread
-class Tracker_PT : public QThread, public ITracker
+class Tracker_PT : public QThread, public ITracker, private pt_types
{
static constexpr double pi = 3.14159265359;
@@ -58,7 +58,7 @@ private:
void set_command(Command command);
void reset_command(Command command);
- bool get_focal_length(float &ret);
+ bool get_focal_length(f& ret);
QMutex camera_mtx;
CVCamera camera;
@@ -75,7 +75,7 @@ private:
volatile bool ever_success;
volatile unsigned char commands;
- static constexpr float rad2deg = float(180/3.14159265);
+ static constexpr f rad2deg = f(180/3.14159265);
//static constexpr float deg2rad = float(3.14159265/180);
};
diff --git a/tracker-pt/ftnoir_tracker_pt_settings.h b/tracker-pt/ftnoir_tracker_pt_settings.h
index e4bfa371..830bf92e 100644
--- a/tracker-pt/ftnoir_tracker_pt_settings.h
+++ b/tracker-pt/ftnoir_tracker_pt_settings.h
@@ -6,8 +6,26 @@
* copyright notice and this permission notice appear in all copies.
*/
-#ifndef FTNOIR_TRACKER_PT_SETTINGS_H
-#define FTNOIR_TRACKER_PT_SETTINGS_H
+#pragma once
+
+#include <limits>
+#include <opencv2/core.hpp>
+
+struct pt_types
+{
+ using f = double;
+
+ static constexpr f eps = std::numeric_limits<f>::epsilon();
+ static constexpr f pi = f(3.14159265358979323846);
+
+ template<int n> using vec = cv::Vec<f, n>;
+ using vec2 = vec<2>;
+ using vec3 = vec<3>;
+
+ template<int y, int x> using mat = cv::Matx<f, y, x>;
+ using mat33 = mat<3, 3>;
+ using mat22 = mat<2, 2>;
+};
#include "opentrack-compat/options.hpp"
using namespace options;
@@ -28,9 +46,9 @@ struct settings_pt : opts
value<int> clip_ty, clip_tz, clip_by, clip_bz;
value<int> active_model_panel, cap_x, cap_y, cap_z;
-
+
value<int> fov;
-
+
value<bool> dynamic_pose;
value<int> init_phase_timeout;
value<bool> auto_threshold;
@@ -67,5 +85,3 @@ struct settings_pt : opts
auto_threshold(b, "automatic-threshold", false)
{}
};
-
-#endif //FTNOIR_TRACKER_PT_SETTINGS_H
diff --git a/tracker-pt/point_extractor.cpp b/tracker-pt/point_extractor.cpp
index fb547ef6..f52ab424 100644
--- a/tracker-pt/point_extractor.cpp
+++ b/tracker-pt/point_extractor.cpp
@@ -21,7 +21,9 @@ PointExtractor::PointExtractor()
points.reserve(max_blobs);
}
-const std::vector<cv::Vec2f>& PointExtractor::extract_points(cv::Mat& frame)
+using vec2 = pt_types::vec2;
+
+const std::vector<vec2>& PointExtractor::extract_points(cv::Mat& frame)
{
const int W = frame.cols;
const int H = frame.rows;
@@ -53,8 +55,8 @@ const std::vector<cv::Vec2f>& PointExtractor::extract_points(cv::Mat& frame)
std::vector<int> { 0 },
cv::Mat(),
hist,
- std::vector<int> { 256/hist_c },
- std::vector<float> { 0, 256/hist_c },
+ std::vector<int> { 256 },
+ std::vector<float> { 0, 256 },
false);
const int sz = hist.cols * hist.rows;
int val = 0;
@@ -145,7 +147,7 @@ const std::vector<cv::Vec2f>& PointExtractor::extract_points(cv::Mat& frame)
for (auto& b : blobs)
{
- cv::Vec2f p((b.pos[0] - W/2)/W, -(b.pos[1] - H/2)/W);
+ vec2 p((b.pos[0] - W/2)/W, -(b.pos[1] - H/2)/W);
points.push_back(p);
}
return points;
diff --git a/tracker-pt/point_extractor.h b/tracker-pt/point_extractor.h
index 94948d34..67b2b8ea 100644
--- a/tracker-pt/point_extractor.h
+++ b/tracker-pt/point_extractor.h
@@ -16,13 +16,13 @@
#include <vector>
-class PointExtractor
+class PointExtractor final : private pt_types
{
public:
// extracts points from frame and draws some processing info into frame, if draw_output is set
// dt: time since last call in seconds
// WARNING: returned reference is valid as long as object
- const std::vector<cv::Vec2f> &extract_points(cv::Mat &frame);
+ const std::vector<vec2>& extract_points(cv::Mat &frame);
int get_n_points() { QMutexLocker l(&mtx); return points.size(); }
PointExtractor();
@@ -31,7 +31,7 @@ private:
static constexpr double pi = 3.14159265359;
static constexpr int hist_c = 1;
- std::vector<cv::Vec2f> points;
+ std::vector<vec2> points;
QMutex mtx;
cv::Mat frame_gray;
cv::Mat frame_bin;
diff --git a/tracker-pt/point_tracker.cpp b/tracker-pt/point_tracker.cpp
index 51f10470..4c1e177f 100644
--- a/tracker-pt/point_tracker.cpp
+++ b/tracker-pt/point_tracker.cpp
@@ -13,32 +13,69 @@
#include <QDebug>
-static void get_row(const cv::Matx33f& m, int i, cv::Vec3f& v)
+using mat33 = pt_types::mat33;
+using vec3 = pt_types::vec3;
+using f = pt_types::f;
+
+static void get_row(const mat33& m, int i, vec3& v)
{
v[0] = m(i,0);
v[1] = m(i,1);
v[2] = m(i,2);
}
-static void set_row(cv::Matx33f& m, int i, const cv::Vec3f& v)
+static void set_row(mat33& m, int i, const vec3& v)
{
m(i,0) = v[0];
m(i,1) = v[1];
m(i,2) = v[2];
}
-static bool d_vals_sort(const std::pair<float,int> a, const std::pair<float,int> b)
+static bool d_vals_sort(const std::pair<f,int> a, const std::pair<f,int> b)
{
return a.first < b.first;
}
-void PointModel::get_d_order(const std::vector<cv::Vec2f>& points, int d_order[], cv::Vec2f d) const
+PointModel::PointModel(settings_pt& s)
+{
+ set_model(s);
+ // calculate u
+ u = M01.cross(M02);
+ u /= norm(u);
+
+ // calculate projection matrix on M01,M02 plane
+ f s11 = M01.dot(M01);
+ f s12 = M01.dot(M02);
+ f s22 = M02.dot(M02);
+ P = 1/(s11*s22-s12*s12) * mat22(s22, -s12, -s12, s11);
+}
+
+void PointModel::set_model(settings_pt& s)
+{
+ switch (s.active_model_panel)
+ {
+ case Clip:
+ M01 = vec3(0, static_cast<f>(s.clip_ty), -static_cast<f>(s.clip_tz));
+ M02 = vec3(0, -static_cast<f>(s.clip_by), -static_cast<f>(s.clip_bz));
+ break;
+ case Cap:
+ M01 = vec3(-static_cast<f>(s.cap_x), -static_cast<f>(s.cap_y), -static_cast<f>(s.cap_z));
+ M02 = vec3(static_cast<f>(s.cap_x), -static_cast<f>(s.cap_y), -static_cast<f>(s.cap_z));
+ break;
+ case Custom:
+ M01 = vec3(s.m01_x, s.m01_y, s.m01_z);
+ M02 = vec3(s.m02_x, s.m02_y, s.m02_z);
+ break;
+ }
+}
+
+void PointModel::get_d_order(const std::vector<vec2>& points, int* d_order, vec2 d) const
{
// fit line to orthographically projected points
- std::vector<std::pair<float,int>> d_vals;
+ std::vector<std::pair<f,int>> d_vals;
// get sort indices with respect to d scalar product
for (unsigned i = 0; i < PointModel::N_POINTS; ++i)
- d_vals.push_back(std::pair<float, int>(d.dot(points[i]), i));
+ d_vals.push_back(std::pair<f, int>(d.dot(points[i]), i));
std::sort(d_vals.begin(),
d_vals.end(),
@@ -54,12 +91,12 @@ PointTracker::PointTracker() : init_phase(true)
{
}
-PointTracker::PointOrder PointTracker::find_correspondences_previous(const std::vector<cv::Vec2f>& points, const PointModel& model, float f)
+PointTracker::PointOrder PointTracker::find_correspondences_previous(const std::vector<vec2>& points, const PointModel& model, f focal_length)
{
PointTracker::PointOrder p;
- p.points[0] = project(cv::Vec3f(0,0,0), f);
- p.points[1] = project(model.M01, f);
- p.points[2] = project(model.M02, f);
+ p.points[0] = project(vec3(0,0,0), focal_length);
+ p.points[1] = project(model.M01, focal_length);
+ p.points[2] = project(model.M02, focal_length);
// set correspondences by minimum distance to projected model point
bool point_taken[PointModel::N_POINTS];
@@ -68,13 +105,13 @@ PointTracker::PointOrder PointTracker::find_correspondences_previous(const std::
for (unsigned i=0; i<PointModel::N_POINTS; ++i)
{
- float min_sdist = 0;
+ f min_sdist = 0;
unsigned min_idx = 0;
// find closest point to projected model point i
for (unsigned j=0; j<PointModel::N_POINTS; ++j)
{
- cv::Vec2f d = p.points[i]-points[j];
- float sdist = d.dot(d);
+ vec2 d = p.points[i]-points[j];
+ f sdist = d.dot(d);
if (sdist < min_sdist || j==0)
{
min_idx = j;
@@ -93,7 +130,7 @@ PointTracker::PointOrder PointTracker::find_correspondences_previous(const std::
return p;
}
-void PointTracker::track(const std::vector<cv::Vec2f>& points, const PointModel& model, float f, bool dynamic_pose, int init_phase_timeout)
+void PointTracker::track(const std::vector<vec2>& points, const PointModel& model, f focal_length, bool dynamic_pose, int init_phase_timeout)
{
PointOrder order;
@@ -106,26 +143,26 @@ void PointTracker::track(const std::vector<cv::Vec2f>& points, const PointModel&
if (!dynamic_pose || init_phase)
order = find_correspondences(points, model);
else
- order = find_correspondences_previous(points, model, f);
+ order = find_correspondences_previous(points, model, focal_length);
- POSIT(model, order, f);
+ POSIT(model, order, focal_length);
init_phase = false;
t.start();
}
-PointTracker::PointOrder PointTracker::find_correspondences(const std::vector<cv::Vec2f>& points, const PointModel& model)
+PointTracker::PointOrder PointTracker::find_correspondences(const std::vector<vec2>& points, const PointModel& model)
{
// We do a simple freetrack-like sorting in the init phase...
// sort points
int point_d_order[PointModel::N_POINTS];
int model_d_order[PointModel::N_POINTS];
- cv::Vec2f d(model.M01[0]-model.M02[0], model.M01[1]-model.M02[1]);
+ vec2 d(model.M01[0]-model.M02[0], model.M01[1]-model.M02[1]);
model.get_d_order(points, point_d_order, d);
// calculate d and d_order for simple freetrack-like point correspondence
- model.get_d_order(std::vector<cv::Vec2f> {
- cv::Vec2f{0,0},
- cv::Vec2f(model.M01[0], model.M01[1]),
- cv::Vec2f(model.M02[0], model.M02[1])
+ model.get_d_order(std::vector<vec2> {
+ vec2{0,0},
+ vec2(model.M01[0], model.M01[1]),
+ vec2(model.M02[0], model.M02[1])
},
model_d_order,
d);
@@ -137,7 +174,7 @@ PointTracker::PointOrder PointTracker::find_correspondences(const std::vector<cv
return p;
}
-int PointTracker::POSIT(const PointModel& model, const PointOrder& order_, float focal_length)
+int PointTracker::POSIT(const PointModel& model, const PointOrder& order_, f focal_length)
{
// POSIT algorithm for coplanar points as presented in
// [Denis Oberkampf, Daniel F. DeMenthon, Larry S. Davis: "Iterative Pose Estimation Using Coplanar Feature Points"]
@@ -145,45 +182,45 @@ int PointTracker::POSIT(const PointModel& model, const PointOrder& order_, float
// The expected rotation used for resolving the ambiguity in POSIT:
// In every iteration step the rotation closer to R_expected is taken
- cv::Matx33f R_expected = cv::Matx33f::eye();
+ mat33 R_expected = mat33::eye();
// initial pose = last (predicted) pose
- cv::Vec3f k;
+ vec3 k;
get_row(R_expected, 2, k);
- float Z0 = 1000.f;
+ f Z0 = f(1000);
- float old_epsilon_1 = 0;
- float old_epsilon_2 = 0;
- float epsilon_1 = 1;
- float epsilon_2 = 1;
+ f old_epsilon_1 = 0;
+ f old_epsilon_2 = 0;
+ f epsilon_1 = 1;
+ f epsilon_2 = 1;
- cv::Vec3f I0, J0;
- cv::Vec2f I0_coeff, J0_coeff;
+ vec3 I0, J0;
+ vec2 I0_coeff, J0_coeff;
- cv::Vec3f I_1, J_1, I_2, J_2;
- cv::Matx33f R_1, R_2;
- cv::Matx33f* R_current;
+ vec3 I_1, J_1, I_2, J_2;
+ mat33 R_1, R_2;
+ mat33* R_current = &R_1;
- constexpr int MAX_ITER = 100;
- const float EPS_THRESHOLD = 1e-4f;
+ static constexpr int max_iter = 100;
- const cv::Vec2f* order = order_.points;
+ const vec2* order = order_.points;
using std::sqrt;
using std::atan;
using std::cos;
using std::sin;
+ using std::fabs;
int i=1;
- for (; i<MAX_ITER; ++i)
+ for (; i<max_iter; ++i)
{
epsilon_1 = k.dot(model.M01)/Z0;
epsilon_2 = k.dot(model.M02)/Z0;
// vector of scalar products <I0, M0i> and <J0, M0i>
- cv::Vec2f I0_M0i(order[1][0]*(1 + epsilon_1) - order[0][0],
+ vec2 I0_M0i(order[1][0]*(1 + epsilon_1) - order[0][0],
order[2][0]*(1 + epsilon_2) - order[0][0]);
- cv::Vec2f J0_M0i(order[1][1]*(1 + epsilon_1) - order[0][1],
+ vec2 J0_M0i(order[1][1]*(1 + epsilon_1) - order[0][1],
order[2][1]*(1 + epsilon_2) - order[0][1]);
// construct projection of I, J onto M0i plane: I0 and J0
@@ -193,22 +230,22 @@ int PointTracker::POSIT(const PointModel& model, const PointOrder& order_, float
J0 = J0_coeff[0]*model.M01 + J0_coeff[1]*model.M02;
// calculate u component of I, J
- float II0 = I0.dot(I0);
- float IJ0 = I0.dot(J0);
- float JJ0 = J0.dot(J0);
- float rho, theta;
+ f II0 = I0.dot(I0);
+ f IJ0 = I0.dot(J0);
+ f JJ0 = J0.dot(J0);
+ f rho, theta;
// CAVEAT don't change to comparison with an epsilon -sh 20160423
if (JJ0 == II0) {
- rho = std::sqrt(std::abs(2*IJ0));
- theta = -PI/4;
+ rho = sqrt(fabs(2*IJ0));
+ theta = -pi/4;
if (IJ0<0) theta *= -1;
}
else {
rho = sqrt(sqrt( (JJ0-II0)*(JJ0-II0) + 4*IJ0*IJ0 ));
theta = atan( -2*IJ0 / (JJ0-II0) );
// avoid branch misprediction
- theta += (JJ0 - II0 < 0) * PI;
- theta /= 2;
+ theta += (JJ0 - II0 < 0) * pi;
+ theta *= f(.5);
}
// construct the two solutions
@@ -218,7 +255,7 @@ int PointTracker::POSIT(const PointModel& model, const PointOrder& order_, float
J_1 = J0 + rho*sin(theta)*model.u;
J_2 = J0 - rho*sin(theta)*model.u;
- float norm_const = 1/cv::norm(I_1); // all have the same norm
+ f norm_const = 1/cv::norm(I_1); // all have the same norm
// create rotation matrices
I_1 *= norm_const; J_1 *= norm_const;
@@ -237,8 +274,8 @@ int PointTracker::POSIT(const PointModel& model, const PointOrder& order_, float
// pick the rotation solution closer to the expected one
// in simple metric d(A,B) = || I - A * B^T ||
- float R_1_deviation = cv::norm(cv::Matx33f::eye() - R_expected * R_1.t());
- float R_2_deviation = cv::norm(cv::Matx33f::eye() - R_expected * R_2.t());
+ f R_1_deviation = cv::norm(mat33::eye() - R_expected * R_1.t());
+ f R_2_deviation = cv::norm(mat33::eye() - R_expected * R_2.t());
if (R_1_deviation < R_2_deviation)
R_current = &R_1;
@@ -248,8 +285,11 @@ int PointTracker::POSIT(const PointModel& model, const PointOrder& order_, float
get_row(*R_current, 2, k);
// check for convergence condition
- if (std::abs(epsilon_1 - old_epsilon_1) + std::abs(epsilon_2 - old_epsilon_2) < EPS_THRESHOLD)
+ const f delta = fabs(epsilon_1 - old_epsilon_1) + fabs(epsilon_2 - old_epsilon_2);
+
+ if (!(delta > eps))
break;
+
old_epsilon_1 = epsilon_1;
old_epsilon_2 = epsilon_2;
}
@@ -266,8 +306,8 @@ int PointTracker::POSIT(const PointModel& model, const PointOrder& order_, float
return i;
}
-cv::Vec2f PointTracker::project(const cv::Vec3f& v_M, float f)
+pt_types::vec2 PointTracker::project(const vec3& v_M, f focal_length)
{
- cv::Vec3f v_C = X_CM * v_M;
- return cv::Vec2f(f*v_C[0]/v_C[2], f*v_C[1]/v_C[2]);
+ vec3 v_C = X_CM * v_M;
+ return vec2(focal_length*v_C[0]/v_C[2], focal_length*v_C[1]/v_C[2]);
}
diff --git a/tracker-pt/point_tracker.h b/tracker-pt/point_tracker.h
index e3f6cdb9..559f7963 100644
--- a/tracker-pt/point_tracker.h
+++ b/tracker-pt/point_tracker.h
@@ -8,23 +8,22 @@
#ifndef POINTTRACKER_H
#define POINTTRACKER_H
+#include "opentrack-compat/timer.hpp"
+#include "ftnoir_tracker_pt_settings.h"
#include <opencv2/core/core.hpp>
#include <memory>
#include <vector>
-#include "opentrack-compat/timer.hpp"
-#include "ftnoir_tracker_pt_settings.h"
-
#include <QObject>
#include <QMutex>
-class Affine
+class Affine final : private pt_types
{
public:
- Affine() : R(cv::Matx33f::eye()), t(0,0,0) {}
- Affine(const cv::Matx33f& R, const cv::Vec3f& t) : R(R),t(t) {}
+ Affine() : R(mat33::eye()), t(0,0,0) {}
+ Affine(const mat33& R, const vec3& t) : R(R),t(t) {}
- cv::Matx33f R;
- cv::Vec3f t;
+ mat33 R;
+ vec3 t;
};
inline Affine operator*(const Affine& X, const Affine& Y)
@@ -32,114 +31,81 @@ inline Affine operator*(const Affine& X, const Affine& Y)
return Affine(X.R*Y.R, X.R*Y.t + X.t);
}
-inline Affine operator*(const cv::Matx33f& X, const Affine& Y)
+inline Affine operator*(const pt_types::mat33& X, const Affine& Y)
{
return Affine(X*Y.R, X*Y.t);
}
-inline Affine operator*(const Affine& X, const cv::Matx33f& Y)
+inline Affine operator*(const Affine& X, const pt_types::mat33& Y)
{
return Affine(X.R*Y, X.t);
}
-inline cv::Vec3f operator*(const Affine& X, const cv::Vec3f& v)
+inline pt_types::vec3 operator*(const Affine& X, const pt_types::vec3& v)
{
return X.R*v + X.t;
}
-
// ----------------------------------------------------------------------------
// Describes a 3-point model
// nomenclature as in
// [Denis Oberkampf, Daniel F. DeMenthon, Larry S. Davis: "Iterative Pose Estimation Using Coplanar Feature Points"]
-class PointModel
+class PointModel final : private pt_types
{
friend class PointTracker;
public:
static constexpr unsigned N_POINTS = 3;
- cv::Vec3f M01; // M01 in model frame
- cv::Vec3f M02; // M02 in model frame
+ vec3 M01; // M01 in model frame
+ vec3 M02; // M02 in model frame
- cv::Vec3f u; // unit vector perpendicular to M01,M02-plane
+ vec3 u; // unit vector perpendicular to M01,M02-plane
- cv::Matx22f P;
+ mat22 P;
- enum Model { Clip = 0, Cap = 1, Custom = 2 };
+ enum Model { Clip, Cap, Custom };
- PointModel(settings_pt& s)
- {
- set_model(s);
- // calculate u
- u = M01.cross(M02);
- u /= norm(u);
-
- // calculate projection matrix on M01,M02 plane
- float s11 = M01.dot(M01);
- float s12 = M01.dot(M02);
- float s22 = M02.dot(M02);
- P = 1/(s11*s22-s12*s12) * cv::Matx22f(s22, -s12, -s12, s11);
- }
-
- void set_model(settings_pt& s)
- {
- switch (s.active_model_panel)
- {
- case Clip:
- M01 = cv::Vec3f(0, static_cast<double>(s.clip_ty), -static_cast<double>(s.clip_tz));
- M02 = cv::Vec3f(0, -static_cast<double>(s.clip_by), -static_cast<double>(s.clip_bz));
- break;
- case Cap:
- M01 = cv::Vec3f(-static_cast<double>(s.cap_x), -static_cast<double>(s.cap_y), -static_cast<double>(s.cap_z));
- M02 = cv::Vec3f(static_cast<double>(s.cap_x), -static_cast<double>(s.cap_y), -static_cast<double>(s.cap_z));
- break;
- case Custom:
- M01 = cv::Vec3f(s.m01_x, s.m01_y, s.m01_z);
- M02 = cv::Vec3f(s.m02_x, s.m02_y, s.m02_z);
- break;
- }
- }
-
- void get_d_order(const std::vector<cv::Vec2f>& points, int* d_order, cv::Vec2f d) const;
+ PointModel(settings_pt& s);
+ void set_model(settings_pt& s);
+ void get_d_order(const std::vector<vec2>& points, int* d_order, vec2 d) const;
};
// ----------------------------------------------------------------------------
// Tracks a 3-point model
// implementing the POSIT algorithm for coplanar points as presented in
// [Denis Oberkampf, Daniel F. DeMenthon, Larry S. Davis: "Iterative Pose Estimation Using Coplanar Feature Points"]
-class PointTracker
+class PointTracker final : private pt_types
{
public:
PointTracker();
// track the pose using the set of normalized point coordinates (x pos in range -0.5:0.5)
// f : (focal length)/(sensor width)
// dt : time since last call
- void track(const std::vector<cv::Vec2f>& projected_points, const PointModel& model, float f, bool dynamic_pose, int init_phase_timeout);
+ void track(const std::vector<vec2>& projected_points, const PointModel& model, f focal_length, bool dynamic_pose, int init_phase_timeout);
Affine pose() { QMutexLocker l(&mtx); return X_CM; }
- cv::Vec2f project(const cv::Vec3f& v_M, float f);
-private:
- static constexpr float PI = 3.14159265358979323846f;
+ vec2 project(const vec3& v_M, PointTracker::f focal_length);
+private:
// the points in model order
struct PointOrder
{
- cv::Vec2f points[PointModel::N_POINTS];
+ vec2 points[PointModel::N_POINTS];
PointOrder()
{
for (unsigned i = 0; i < PointModel::N_POINTS; i++)
- points[i] = cv::Vec2f(0, 0);
+ points[i] = vec2(0, 0);
}
};
- PointOrder find_correspondences(const std::vector<cv::Vec2f>& projected_points, const PointModel &model);
- PointOrder find_correspondences_previous(const std::vector<cv::Vec2f>& points, const PointModel &model, float f);
- int POSIT(const PointModel& point_model, const PointOrder& order, float focal_length); // The POSIT algorithm, returns the number of iterations
+ PointOrder find_correspondences(const std::vector<vec2>& projected_points, const PointModel &model);
+ PointOrder find_correspondences_previous(const std::vector<vec2>& points, const PointModel &model, f focal_length);
+ int POSIT(const PointModel& point_model, const PointOrder& order, f focal_length); // The POSIT algorithm, returns the number of iterations
Affine X_CM; // trafo from model to camera
Timer t;
- bool init_phase;
QMutex mtx;
+ bool init_phase;
};
#endif //POINTTRACKER_H