summaryrefslogtreecommitdiffhomepage
path: root/tracker-pt
diff options
context:
space:
mode:
authorStanislaw Halik <sthalik@misaki.pl>2016-09-20 22:43:50 +0200
committerStanislaw Halik <sthalik@misaki.pl>2016-09-20 23:24:13 +0200
commit8faba81716c5d4283a31262dea795888cdb372b0 (patch)
tree6814b0261264d6f1b3996f96e45e12cbc548b1e4 /tracker-pt
parentc28efa181dc302489ff5bd1bb2f7d2d5d88bfd92 (diff)
tracker/pt: use doubles
We don't have pt_types namespace on this branch so hardcode using double instead.
Diffstat (limited to 'tracker-pt')
-rw-r--r--tracker-pt/camera.cpp10
-rw-r--r--tracker-pt/camera.h5
-rwxr-xr-xtracker-pt/ftnoir_tracker_pt.cpp47
-rwxr-xr-xtracker-pt/ftnoir_tracker_pt.h8
-rw-r--r--tracker-pt/point_tracker.cpp106
-rw-r--r--tracker-pt/point_tracker.h50
6 files changed, 115 insertions, 111 deletions
diff --git a/tracker-pt/camera.cpp b/tracker-pt/camera.cpp
index ee021d4c..1aceb783 100644
--- a/tracker-pt/camera.cpp
+++ b/tracker-pt/camera.cpp
@@ -53,16 +53,16 @@ bool Camera::get_info(CamInfo& ret)
return true;
}
-bool Camera::get_frame(float dt, cv::Mat* frame)
+bool Camera::get_frame(double dt, cv::Mat* frame)
{
bool new_frame = _get_frame(frame);
// measure fps of valid frames
- const float dt_smoothing_const = 0.95;
+ const double dt_smoothing_const = 0.95;
dt_valid += dt;
if (new_frame)
{
- dt_mean = dt_smoothing_const * dt_mean + (1.0 - dt_smoothing_const) * dt_valid;
- cam_info.fps = dt_mean > 1e-3 ? 1.0 / dt_mean : 0;
+ dt_mean = dt_smoothing_const * dt_mean + (1 - dt_smoothing_const) * dt_valid;
+ cam_info.fps = int(std::round(dt_mean > 1e-3 ? 1 / dt_mean : 0));
dt_valid = 0;
}
else
@@ -102,7 +102,7 @@ void CVCamera::stop()
// give opencv time to exit camera threads, etc.
if (opened)
portable::sleep(500);
- qDebug() << "pt camera: assuming stopped";
+ qDebug() << "pt camera: stopped";
}
}
diff --git a/tracker-pt/camera.h b/tracker-pt/camera.h
index d0dfdd84..f82c74a6 100644
--- a/tracker-pt/camera.h
+++ b/tracker-pt/camera.h
@@ -40,7 +40,7 @@ public:
void set_res(int x_res, int y_res);
// gets a frame from the camera, dt: time since last call in seconds
- bool get_frame(float dt, cv::Mat* frame);
+ bool get_frame(double dt, cv::Mat* frame);
// WARNING: returned references are valid as long as object
bool get_info(CamInfo &ret);
@@ -55,8 +55,7 @@ protected:
virtual void _set_fps() = 0;
virtual void _set_res() = 0;
private:
- float dt_valid;
- float dt_mean;
+ double dt_valid, dt_mean;
protected:
int desired_index;
int active_index;
diff --git a/tracker-pt/ftnoir_tracker_pt.cpp b/tracker-pt/ftnoir_tracker_pt.cpp
index 1bf581a7..135a8ccf 100755
--- a/tracker-pt/ftnoir_tracker_pt.cpp
+++ b/tracker-pt/ftnoir_tracker_pt.cpp
@@ -19,11 +19,11 @@
//#define PT_PERF_LOG //log performance
//-----------------------------------------------------------------------------
-Tracker_PT::Tracker_PT()
- : commands(0),
- video_widget(NULL),
- video_frame(NULL),
- ever_success(false)
+Tracker_PT::Tracker_PT() :
+ video_widget(NULL),
+ video_frame(NULL),
+ commands(0),
+ ever_success(false)
{
connect(s.b.get(), SIGNAL(saving()), this, SLOT(apply_settings()));
}
@@ -56,7 +56,7 @@ void Tracker_PT::reset_command(Command command)
commands &= ~command;
}
-bool Tracker_PT::get_focal_length(float& ret)
+bool Tracker_PT::get_focal_length(double& ret)
{
static constexpr float pi = 3.1415926;
float fov_;
@@ -115,7 +115,7 @@ void Tracker_PT::run()
{
const auto& points = point_extractor.extract_points(frame_);
- float fx;
+ double fx;
if (!get_focal_length(fx))
continue;
@@ -126,12 +126,12 @@ void Tracker_PT::run()
point_tracker.track(points, PointModel(s), fx, s.dynamic_pose, s.init_phase_timeout);
ever_success = true;
}
-
+
Affine X_CM = pose();
- std::function<void(const cv::Vec2f&, const cv::Scalar)> fun = [&](const cv::Vec2f& p, const cv::Scalar color)
+ std::function<void(const cv::Vec2d&, const cv::Scalar&)> fun = [&](const cv::Vec2d& p, const cv::Scalar& color)
{
- auto p2 = cv::Point(p[0] * frame_.cols + frame_.cols/2, -p[1] * frame_.cols + frame_.rows/2);
+ cv::Point p2(p[0] * frame_.cols + frame_.cols/2, -p[1] * frame_.cols + frame_.rows/2);
cv::line(frame_,
cv::Point(p2.x - 20, p2.y),
cv::Point(p2.x + 20, p2.y),
@@ -141,19 +141,19 @@ void Tracker_PT::run()
cv::Point(p2.x, p2.y - 20),
cv::Point(p2.x, p2.y + 20),
color,
- 4);
+ 4);
};
for (unsigned i = 0; i < points.size(); i++)
{
fun(points[i], cv::Scalar(0, 255, 0));
}
-
+
{
- Affine X_MH(cv::Matx33f::eye(), get_model_offset()); // just copy pasted these lines from below
+ Affine X_MH(cv::Matx33d::eye(), get_model_offset()); // just copy pasted these lines from below
Affine X_GH = X_CM * X_MH;
- cv::Vec3f p = X_GH.t; // head (center?) position in global space
- cv::Vec2f p_(p[0] / p[2] * fx, p[1] / p[2] * fx); // projected to screen
+ cv::Vec3d p = X_GH.t; // head (center?) position in global space
+ cv::Vec2d p_(p[0] / p[2] * fx, p[1] / p[2] * fx); // projected to screen
fun(p_, cv::Scalar(0, 0, 255));
}
@@ -163,9 +163,9 @@ void Tracker_PT::run()
qDebug()<<"Tracker:: Thread stopping";
}
-cv::Vec3f Tracker_PT::get_model_offset()
+cv::Vec3d Tracker_PT::get_model_offset()
{
- cv::Vec3f offset(s.t_MH_x, s.t_MH_y, s.t_MH_z);
+ cv::Vec3d offset(s.t_MH_x, s.t_MH_y, s.t_MH_z);
if (offset[0] == 0 && offset[1] == 0 && offset[2] == 0)
{
int m = s.model_used;
@@ -242,21 +242,24 @@ void Tracker_PT::data(double *data)
{
Affine X_CM = pose();
- Affine X_MH(cv::Matx33f::eye(), get_model_offset());
+ Affine X_MH(cv::Matx33d::eye(), get_model_offset());
Affine X_GH = X_CM * X_MH;
- cv::Matx33f R = X_GH.R;
- cv::Vec3f t = X_GH.t;
+ cv::Matx33d R = X_GH.R;
+ cv::Vec3d t = X_GH.t;
// translate rotation matrix from opengl (G) to roll-pitch-yaw (E) frame
// -z -> x, y -> z, x -> -y
- cv::Matx33f R_EG(0, 0,-1,
+ cv::Matx33d R_EG(0, 0,-1,
-1, 0, 0,
0, 1, 0);
R = R_EG * R * R_EG.t();
+ using std::atan2;
+ using std::sqrt;
+
// extract rotation angles
- float alpha, beta, gamma;
+ double alpha, beta, gamma;
beta = atan2( -R(2,0), sqrt(R(2,1)*R(2,1) + R(2,2)*R(2,2)) );
alpha = atan2( R(1,0), R(0,0));
gamma = atan2( R(2,1), R(2,2));
diff --git a/tracker-pt/ftnoir_tracker_pt.h b/tracker-pt/ftnoir_tracker_pt.h
index 46615cc3..613d9e82 100755
--- a/tracker-pt/ftnoir_tracker_pt.h
+++ b/tracker-pt/ftnoir_tracker_pt.h
@@ -54,13 +54,11 @@ private:
};
void set_command(Command command);
void reset_command(Command command);
- cv::Vec3f get_model_offset();
-
- bool get_focal_length(float &ret);
-
- volatile int commands;
+ cv::Vec3d get_model_offset();
QMutex camera_mtx;
+ bool get_focal_length(double& ret);
+
CVCamera camera;
PointExtractor point_extractor;
PointTracker point_tracker;
diff --git a/tracker-pt/point_tracker.cpp b/tracker-pt/point_tracker.cpp
index 599ce2d3..bc5bf3cd 100644
--- a/tracker-pt/point_tracker.cpp
+++ b/tracker-pt/point_tracker.cpp
@@ -15,32 +15,32 @@
const float PI = 3.14159265358979323846f;
-static void get_row(const cv::Matx33f& m, int i, cv::Vec3f& v)
+static void get_row(const cv::Matx33d& m, int i, cv::Vec3d& v)
{
v[0] = m(i,0);
v[1] = m(i,1);
v[2] = m(i,2);
}
-static void set_row(cv::Matx33f& m, int i, const cv::Vec3f& v)
+static void set_row(cv::Matx33d& m, int i, const cv::Vec3d& v)
{
m(i,0) = v[0];
m(i,1) = v[1];
m(i,2) = v[2];
}
-static bool d_vals_sort(const std::pair<float,int> a, const std::pair<float,int> b)
+static bool d_vals_sort(const std::pair<double,int> a, const std::pair<double,int> b)
{
return a.first < b.first;
}
-void PointModel::get_d_order(const std::vector<cv::Vec2f>& points, int d_order[], cv::Vec2f d) const
+void PointModel::get_d_order(const std::vector<cv::Vec2d>& points, int* d_order, const cv::Vec2d& d) const
{
// fit line to orthographically projected points
- std::vector<std::pair<float,int>> d_vals;
+ std::vector<std::pair<double,int>> d_vals;
// get sort indices with respect to d scalar product
for (unsigned i = 0; i<points.size(); ++i)
- d_vals.push_back(std::pair<float, int>(d.dot(points[i]), i));
+ d_vals.push_back(std::pair<double, int>(d.dot(points[i]), i));
std::sort(d_vals.begin(),
d_vals.end(),
@@ -70,13 +70,13 @@ PointTracker::PointOrder PointTracker::find_correspondences_previous(const std::
for (int i=0; i<PointModel::N_POINTS; ++i)
{
- float min_sdist = 0;
- int min_idx = 0;
+ double min_sdist = 0;
+ unsigned min_idx = 0;
// find closest point to projected model point i
for (int j=0; j<PointModel::N_POINTS; ++j)
{
- cv::Vec2f d = p.points[i]-points[j];
- float sdist = d.dot(d);
+ cv::Vec2d d = p.points[i]-points[j];
+ double sdist = d.dot(d);
if (sdist < min_sdist || j==0)
{
min_idx = j;
@@ -115,19 +115,19 @@ void PointTracker::track(const std::vector<cv::Vec2f>& points, const PointModel&
t.start();
}
-PointTracker::PointOrder PointTracker::find_correspondences(const std::vector<cv::Vec2f>& points, const PointModel& model)
+PointTracker::PointOrder PointTracker::find_correspondences(const std::vector<cv::Vec2d>& points, const PointModel& model)
{
// We do a simple freetrack-like sorting in the init phase...
// sort points
int point_d_order[PointModel::N_POINTS];
int model_d_order[PointModel::N_POINTS];
- cv::Vec2f d(model.M01[0]-model.M02[0], model.M01[1]-model.M02[1]);
+ cv::Vec2d d(model.M01[0]-model.M02[0], model.M01[1]-model.M02[1]);
model.get_d_order(points, point_d_order, d);
// calculate d and d_order for simple freetrack-like point correspondence
- model.get_d_order(std::vector<cv::Vec2f> {
- cv::Vec2f{0,0},
- cv::Vec2f(model.M01[0], model.M01[1]),
- cv::Vec2f(model.M02[0], model.M02[1])
+ model.get_d_order(std::vector<cv::Vec2d> {
+ cv::Vec2d{0,0},
+ cv::Vec2d(model.M01[0], model.M01[1]),
+ cv::Vec2d(model.M02[0], model.M02[1])
},
model_d_order,
d);
@@ -140,6 +140,7 @@ PointTracker::PointOrder PointTracker::find_correspondences(const std::vector<cv
}
int PointTracker::POSIT(const PointModel& model, const PointOrder& order_, float focal_length)
+bool PointTracker::POSIT(const PointModel& model, const PointOrder& order_, double focal_length)
{
// POSIT algorithm for coplanar points as presented in
// [Denis Oberkampf, Daniel F. DeMenthon, Larry S. Davis: "Iterative Pose Estimation Using Coplanar Feature Points"]
@@ -147,29 +148,29 @@ int PointTracker::POSIT(const PointModel& model, const PointOrder& order_, float
// The expected rotation used for resolving the ambiguity in POSIT:
// In every iteration step the rotation closer to R_expected is taken
- cv::Matx33f R_expected = cv::Matx33f::eye();
+ cv::Matx33d R_expected = cv::Matx33d::eye();
// initial pose = last (predicted) pose
- cv::Vec3f k;
+ cv::Vec3d k;
get_row(R_expected, 2, k);
- float Z0 = 1000.f;
- float old_epsilon_1 = 0;
- float old_epsilon_2 = 0;
- float epsilon_1 = 1;
- float epsilon_2 = 1;
+ double Z0 = 1000;
+ double old_epsilon_1 = 0;
+ double old_epsilon_2 = 0;
+ double epsilon_1 = 1;
+ double epsilon_2 = 1;
- cv::Vec3f I0, J0;
- cv::Vec2f I0_coeff, J0_coeff;
+ cv::Vec3d I0, J0;
+ cv::Vec2d I0_coeff, J0_coeff;
- cv::Vec3f I_1, J_1, I_2, J_2;
- cv::Matx33f R_1, R_2;
- cv::Matx33f* R_current;
+ cv::Vec3d I_1, J_1, I_2, J_2;
+ cv::Matx33d R_1, R_2;
+ cv::Matx33d& R_current = R_1;
- const int MAX_ITER = 100;
- const float EPS_THRESHOLD = 1e-4;
+ const int MAX_ITER = 500;
+ static constexpr double eps = 1e-6;
- const cv::Vec2f* order = order_.points;
+ const cv::Vec2d* order = order_.points;
int i=1;
for (; i<MAX_ITER; ++i)
@@ -178,9 +179,9 @@ int PointTracker::POSIT(const PointModel& model, const PointOrder& order_, float
epsilon_2 = k.dot(model.M02)/Z0;
// vector of scalar products <I0, M0i> and <J0, M0i>
- cv::Vec2f I0_M0i(order[1][0]*(1.0 + epsilon_1) - order[0][0],
+ cv::Vec2d I0_M0i(order[1][0]*(1.0 + epsilon_1) - order[0][0],
order[2][0]*(1.0 + epsilon_2) - order[0][0]);
- cv::Vec2f J0_M0i(order[1][1]*(1.0 + epsilon_1) - order[0][1],
+ cv::Vec2d J0_M0i(order[1][1]*(1.0 + epsilon_1) - order[0][1],
order[2][1]*(1.0 + epsilon_2) - order[0][1]);
// construct projection of I, J onto M0i plane: I0 and J0
@@ -190,20 +191,20 @@ int PointTracker::POSIT(const PointModel& model, const PointOrder& order_, float
J0 = J0_coeff[0]*model.M01 + J0_coeff[1]*model.M02;
// calculate u component of I, J
- float II0 = I0.dot(I0);
- float IJ0 = I0.dot(J0);
- float JJ0 = J0.dot(J0);
- float rho, theta;
+ double II0 = I0.dot(I0);
+ double IJ0 = I0.dot(J0);
+ double JJ0 = J0.dot(J0);
+ double rho, theta;
if (JJ0 == II0) {
- rho = std::sqrt(std::abs(2*IJ0));
- theta = -PI/4;
+ rho = std::sqrt(std::fabs(2*IJ0));
+ theta = -M_PI/4;
if (IJ0<0) theta *= -1;
}
else {
rho = sqrt(sqrt( (JJ0-II0)*(JJ0-II0) + 4*IJ0*IJ0 ));
theta = atan( -2*IJ0 / (JJ0-II0) );
// avoid branch misprediction
- theta += (JJ0 - II0 < 0) * PI;
+ theta += (JJ0 - II0 < 0) * M_PI;
theta /= 2;
}
@@ -214,7 +215,7 @@ int PointTracker::POSIT(const PointModel& model, const PointOrder& order_, float
J_1 = J0 + rho*sin(theta)*model.u;
J_2 = J0 - rho*sin(theta)*model.u;
- float norm_const = 1.0/cv::norm(I_1); // all have the same norm
+ double norm_const = 1/cv::norm(I_1); // all have the same norm
// create rotation matrices
I_1 *= norm_const; J_1 *= norm_const;
@@ -233,26 +234,29 @@ int PointTracker::POSIT(const PointModel& model, const PointOrder& order_, float
// pick the rotation solution closer to the expected one
// in simple metric d(A,B) = || I - A * B^T ||
- float R_1_deviation = cv::norm(cv::Matx33f::eye() - R_expected * R_1.t());
- float R_2_deviation = cv::norm(cv::Matx33f::eye() - R_expected * R_2.t());
+ double R_1_deviation = cv::norm(cv::Matx33d::eye() - R_expected * R_1.t());
+ double R_2_deviation = cv::norm(cv::Matx33d::eye() - R_expected * R_2.t());
if (R_1_deviation < R_2_deviation)
- R_current = &R_1;
+ R_current = R_1;
else
- R_current = &R_2;
+ R_current = R_2;
- get_row(*R_current, 2, k);
+ get_row(R_current, 2, k);
// check for convergence condition
- if (std::abs(epsilon_1 - old_epsilon_1) + std::abs(epsilon_2 - old_epsilon_2) < EPS_THRESHOLD)
+ const double delta = fabs(epsilon_1 - old_epsilon_1) + fabs(epsilon_2 - old_epsilon_2);
+
+ if (!(delta > eps))
break;
+
old_epsilon_1 = epsilon_1;
old_epsilon_2 = epsilon_2;
}
QMutexLocker l(&mtx);
// apply results
- X_CM.R = *R_current;
+ X_CM.R = R_current;
X_CM.t[0] = order[0][0] * Z0/focal_length;
X_CM.t[1] = order[0][1] * Z0/focal_length;
X_CM.t[2] = Z0;
@@ -262,8 +266,8 @@ int PointTracker::POSIT(const PointModel& model, const PointOrder& order_, float
return i;
}
-cv::Vec2f PointTracker::project(const cv::Vec3f& v_M, float f)
+cv::Vec2d PointTracker::project(const cv::Vec3d& v_M, double f)
{
- cv::Vec3f v_C = X_CM * v_M;
- return cv::Vec2f(f*v_C[0]/v_C[2], f*v_C[1]/v_C[2]);
+ cv::Vec3d v_C = X_CM * v_M;
+ return cv::Vec2d(f*v_C[0]/v_C[2], f*v_C[1]/v_C[2]);
}
diff --git a/tracker-pt/point_tracker.h b/tracker-pt/point_tracker.h
index 48c7617e..00e9278c 100644
--- a/tracker-pt/point_tracker.h
+++ b/tracker-pt/point_tracker.h
@@ -20,11 +20,11 @@
class Affine
{
public:
- Affine() : R(cv::Matx33f::eye()), t(0,0,0) {}
- Affine(const cv::Matx33f& R, const cv::Vec3f& t) : R(R),t(t) {}
+ Affine() : R(cv::Matx33d::eye()), t(0,0,0) {}
+ Affine(const cv::Matx33d& R, const cv::Vec3d& t) : R(R),t(t) {}
- cv::Matx33f R;
- cv::Vec3f t;
+ cv::Matx33d R;
+ cv::Vec3d t;
};
inline Affine operator*(const Affine& X, const Affine& Y)
@@ -32,17 +32,17 @@ inline Affine operator*(const Affine& X, const Affine& Y)
return Affine(X.R*Y.R, X.R*Y.t + X.t);
}
-inline Affine operator*(const cv::Matx33f& X, const Affine& Y)
+inline Affine operator*(const cv::Matx33d& X, const Affine& Y)
{
return Affine(X*Y.R, X*Y.t);
}
-inline Affine operator*(const Affine& X, const cv::Matx33f& Y)
+inline Affine operator*(const Affine& X, const cv::Matx33d& Y)
{
return Affine(X.R*Y, X.t);
}
-inline cv::Vec3f operator*(const Affine& X, const cv::Vec3f& v)
+inline cv::Vec3d operator*(const Affine& X, const cv::Vec3d& v)
{
return X.R*v + X.t;
}
@@ -58,12 +58,12 @@ class PointModel
public:
static constexpr int N_POINTS = 3;
- cv::Vec3f M01; // M01 in model frame
- cv::Vec3f M02; // M02 in model frame
+ cv::Vec3d M01; // M01 in model frame
+ cv::Vec3d M02; // M02 in model frame
- cv::Vec3f u; // unit vector perpendicular to M01,M02-plane
+ cv::Vec3d u; // unit vector perpendicular to M01,M02-plane
- cv::Matx22f P;
+ cv::Matx22d P;
PointModel(settings_pt& s)
{
@@ -73,10 +73,10 @@ public:
u /= norm(u);
// calculate projection matrix on M01,M02 plane
- float s11 = M01.dot(M01);
- float s12 = M01.dot(M02);
- float s22 = M02.dot(M02);
- P = 1.0/(s11*s22-s12*s12) * cv::Matx22f(s22, -s12, -s12, s11);
+ double s11 = M01.dot(M01);
+ double s12 = M01.dot(M02);
+ double s22 = M02.dot(M02);
+ P = 1.0/(s11*s22-s12*s12) * cv::Matx22d(s22, -s12, -s12, s11);
}
void set_model(settings_pt& s)
@@ -89,29 +89,29 @@ public:
case Cap:
{
const double x = 60, y = 90, z = 95;
- M01 = cv::Vec3f(-x, -y, z);
- M02 = cv::Vec3f(x, -y, z);
+ M01 = cv::Vec3d(-x, -y, z);
+ M02 = cv::Vec3d(x, -y, z);
break;
}
case ClipLeft:
case ClipRight:
{
const double a = 27, b = 43, c = 62, d = 74;
- M01 = cv::Vec3f(0, b, -a);
- M02 = cv::Vec3f(0, -c, -d);
+ M01 = cv::Vec3d(0, b, -a);
+ M02 = cv::Vec3d(0, -c, -d);
break;
}
}
}
- void get_d_order(const std::vector<cv::Vec2f>& points, int* d_order, cv::Vec2f d) const;
+ void get_d_order(const std::vector<cv::Vec2d>& points, int* d_order, const cv::Vec2d& d) const;
};
// ----------------------------------------------------------------------------
// Tracks a 3-point model
// implementing the POSIT algorithm for coplanar points as presented in
// [Denis Oberkampf, Daniel F. DeMenthon, Larry S. Davis: "Iterative Pose Estimation Using Coplanar Feature Points"]
-class PointTracker
+class PointTracker final
{
public:
PointTracker();
@@ -125,17 +125,17 @@ private:
// the points in model order
struct PointOrder
{
- cv::Vec2f points[PointModel::N_POINTS];
+ cv::Vec2d points[PointModel::N_POINTS];
PointOrder()
{
for (int i = 0; i < PointModel::N_POINTS; i++)
- points[i] = cv::Vec2f(0, 0);
+ points[i] = cv::Vec2d(0, 0);
}
};
- PointOrder find_correspondences(const std::vector<cv::Vec2f>& projected_points, const PointModel &model);
PointOrder find_correspondences_previous(const std::vector<cv::Vec2f>& points, const PointModel &model, float f);
- int POSIT(const PointModel& point_model, const PointOrder& order, float focal_length); // The POSIT algorithm, returns the number of iterations
+ PointOrder find_correspondences(const std::vector<cv::Vec2d>& projected_points, const PointModel &model);
+ bool POSIT(const PointModel& point_model, const PointOrder& order, double focal_length); // The POSIT algorithm, returns the number of iterations
Affine X_CM; // trafo from model to camera