summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--FTNoIR_Tracker_PT/FTNoIR_PT_Controls.ui13
-rw-r--r--FTNoIR_Tracker_PT/camera.cpp8
-rw-r--r--FTNoIR_Tracker_PT/camera.h6
-rw-r--r--FTNoIR_Tracker_PT/ftnoir_tracker_pt.cpp4
-rw-r--r--FTNoIR_Tracker_PT/ftnoir_tracker_pt_dialog.cpp2
-rw-r--r--FTNoIR_Tracker_PT/ftnoir_tracker_pt_settings.h4
-rw-r--r--FTNoIR_Tracker_PT/point_tracker.cpp191
-rw-r--r--FTNoIR_Tracker_PT/point_tracker.h33
8 files changed, 169 insertions, 92 deletions
diff --git a/FTNoIR_Tracker_PT/FTNoIR_PT_Controls.ui b/FTNoIR_Tracker_PT/FTNoIR_PT_Controls.ui
index 461253cf..bdbed955 100644
--- a/FTNoIR_Tracker_PT/FTNoIR_PT_Controls.ui
+++ b/FTNoIR_Tracker_PT/FTNoIR_PT_Controls.ui
@@ -515,26 +515,23 @@
<item row="1" column="0">
<widget class="QLabel" name="label_34">
<property name="text">
- <string>FOV</string>
+ <string>F/W</string>
</property>
<property name="buddy">
- <cstring>fov_dspin</cstring>
+ <cstring>f_dspin</cstring>
</property>
</widget>
</item>
<item row="1" column="1">
- <widget class="QDoubleSpinBox" name="fov_dspin">
+ <widget class="QDoubleSpinBox" name="f_dspin">
<property name="toolTip">
<string>The camera's focal length devided by its sensor width</string>
</property>
<property name="decimals">
<number>2</number>
</property>
- <property name="minimum">
- <double>1.000000000000000</double>
- </property>
<property name="singleStep">
- <double>1.000000000000000</double>
+ <double>0.100000000000000</double>
</property>
</widget>
</item>
@@ -1736,7 +1733,7 @@
<tabstop>res_x_spin</tabstop>
<tabstop>res_y_spin</tabstop>
<tabstop>fps_spin</tabstop>
- <tabstop>fov_dspin</tabstop>
+ <tabstop>f_dspin</tabstop>
<tabstop>camroll_combo</tabstop>
<tabstop>campitch_spin</tabstop>
<tabstop>camyaw_spin</tabstop>
diff --git a/FTNoIR_Tracker_PT/camera.cpp b/FTNoIR_Tracker_PT/camera.cpp
index 8986be60..754533c5 100644
--- a/FTNoIR_Tracker_PT/camera.cpp
+++ b/FTNoIR_Tracker_PT/camera.cpp
@@ -108,11 +108,11 @@ void Camera::set_device_index(int index)
}
}
-void Camera::set_fov(float f)
+void Camera::set_f(float f)
{
- if (cam_desired.fov != f)
+ if (cam_desired.f != f)
{
- cam_desired.fov = f;
+ cam_desired.f = f;
_set_f();
}
}
@@ -208,7 +208,7 @@ void CVCamera::_set_index()
void CVCamera::_set_f()
{
- cam_info.fov = cam_desired.fov;
+ cam_info.f = cam_desired.f;
}
void CVCamera::_set_fps()
diff --git a/FTNoIR_Tracker_PT/camera.h b/FTNoIR_Tracker_PT/camera.h
index 6768e419..ea68c387 100644
--- a/FTNoIR_Tracker_PT/camera.h
+++ b/FTNoIR_Tracker_PT/camera.h
@@ -25,12 +25,12 @@ void get_camera_device_names(std::vector<std::string>& device_names);
// ----------------------------------------------------------------------------
struct CamInfo
{
- CamInfo() : res_x(0), res_y(0), fps(0), fov(56) {}
+ CamInfo() : res_x(0), res_y(0), fps(0), f(1) {}
int res_x;
int res_y;
int fps;
- float fov;
+ float f; // (focal length) / (sensor width)
};
// ----------------------------------------------------------------------------
@@ -48,7 +48,7 @@ public:
// calls corresponding template methods and reinitializes frame rate calculation
void set_device_index(int index);
- void set_fov(float f);
+ void set_f(float f);
void set_fps(int fps);
void set_res(int x_res, int y_res);
diff --git a/FTNoIR_Tracker_PT/ftnoir_tracker_pt.cpp b/FTNoIR_Tracker_PT/ftnoir_tracker_pt.cpp
index 740cf3da..53d9c9e4 100644
--- a/FTNoIR_Tracker_PT/ftnoir_tracker_pt.cpp
+++ b/FTNoIR_Tracker_PT/ftnoir_tracker_pt.cpp
@@ -83,7 +83,7 @@ void Tracker::run()
{
frame = frame_rotation.rotate_frame(frame);
const std::vector<cv::Vec2f>& points = point_extractor.extract_points(frame, dt, false);
- tracking_valid = point_tracker.track(points, camera.get_info().fov, dt, frame.cols, frame.rows, t_MH);
+ tracking_valid = point_tracker.track(points, camera.get_info().f, dt);
video_widget->update_image(frame);
}
#ifdef PT_PERF_LOG
@@ -103,7 +103,7 @@ void Tracker::apply(settings& s)
camera.set_device_index(s.cam_index);
camera.set_res(s.cam_res_x, s.cam_res_y);
camera.set_fps(s.cam_fps);
- camera.set_fov(s.cam_fov);
+ camera.set_f(s.cam_f);
frame_rotation.rotation = static_cast<RotationType>(static_cast<int>(s.cam_roll));
point_extractor.threshold_val = s.threshold;
point_extractor.threshold_secondary_val = s.threshold_secondary;
diff --git a/FTNoIR_Tracker_PT/ftnoir_tracker_pt_dialog.cpp b/FTNoIR_Tracker_PT/ftnoir_tracker_pt_dialog.cpp
index 4ae20f48..c103b78c 100644
--- a/FTNoIR_Tracker_PT/ftnoir_tracker_pt_dialog.cpp
+++ b/FTNoIR_Tracker_PT/ftnoir_tracker_pt_dialog.cpp
@@ -46,7 +46,7 @@ TrackerDialog::TrackerDialog()
tie_setting(s.reset_time, ui.reset_spin);
tie_setting(s.cam_index, ui.camdevice_combo);
- tie_setting(s.cam_fov, ui.fov_dspin);
+ tie_setting(s.cam_f, ui.f_dspin);
tie_setting(s.cam_res_x, ui.res_x_spin);
tie_setting(s.cam_res_y, ui.res_y_spin);
tie_setting(s.cam_fps, ui.fps_spin);
diff --git a/FTNoIR_Tracker_PT/ftnoir_tracker_pt_settings.h b/FTNoIR_Tracker_PT/ftnoir_tracker_pt_settings.h
index b2ab8854..109090b3 100644
--- a/FTNoIR_Tracker_PT/ftnoir_tracker_pt_settings.h
+++ b/FTNoIR_Tracker_PT/ftnoir_tracker_pt_settings.h
@@ -28,7 +28,7 @@ struct settings
threshold_secondary,
min_point_size,
max_point_size;
- value<double> cam_fov;
+ value<double> cam_f;
value<int> m01_x, m01_y, m01_z;
value<int> m02_x, m02_y, m02_z;
@@ -57,7 +57,7 @@ struct settings
threshold_secondary(b, "threshold-secondary", 128),
min_point_size(b, "min-point-size", 10),
max_point_size(b, "max-point-size", 50),
- cam_fov(b, "camera-fov", 56),
+ cam_f(b, "camera-focal-length", 1),
m01_x(b, "m_01-x", 0),
m01_y(b, "m_01-y", 0),
m01_z(b, "m_01-z", 0),
diff --git a/FTNoIR_Tracker_PT/point_tracker.cpp b/FTNoIR_Tracker_PT/point_tracker.cpp
index 1df70b17..dfefdaf8 100644
--- a/FTNoIR_Tracker_PT/point_tracker.cpp
+++ b/FTNoIR_Tracker_PT/point_tracker.cpp
@@ -17,6 +17,23 @@ using namespace cv;
using namespace boost;
using namespace std;
+const float PI = 3.14159265358979323846f;
+
+// ----------------------------------------------------------------------------
+static void get_row(const Matx33f& m, int i, Vec3f& v)
+{
+ v[0] = m(i,0);
+ v[1] = m(i,1);
+ v[2] = m(i,2);
+}
+
+static void set_row(Matx33f& m, int i, const Vec3f& v)
+{
+ m(i,0) = v[0];
+ m(i,1) = v[1];
+ m(i,2) = v[2];
+}
+
// ----------------------------------------------------------------------------
PointModel::PointModel(Vec3f M01, Vec3f M02)
: M01(M01),
@@ -91,11 +108,9 @@ PointTracker::PointTracker()
dt_reset(1),
v_t(0,0,0),
v_r(0,0,0),
- dynamic_pose_resolution(true),
- fov(0),
- _w(0),
- _h(0)
+ dynamic_pose_resolution(true)
{
+ X_CM.t[2] = 1000; // default position: 1 m away from cam;
}
void PointTracker::reset()
@@ -113,7 +128,7 @@ void PointTracker::reset_velocities()
}
-bool PointTracker::track(const vector<Vec2f>& points, float fov, float dt, int w, int h, const cv::Vec3f& headpos)
+bool PointTracker::track(const vector<Vec2f>& points, float f, float dt)
{
if (!dynamic_pose_resolution) init_phase = true;
@@ -125,7 +140,12 @@ bool PointTracker::track(const vector<Vec2f>& points, float fov, float dt, int w
reset();
}
- bool no_model = !point_model;
+ bool no_model =
+#ifdef OPENTRACK_API
+ point_model.get() == NULL;
+#else
+ !point_model;
+#endif
// if there is a pointtracking problem, reset the velocities
if (no_model || points.size() != PointModel::N_POINTS)
@@ -141,7 +161,7 @@ bool PointTracker::track(const vector<Vec2f>& points, float fov, float dt, int w
predict(dt_valid);
// if there is a point correspondence problem something has gone wrong, do a reset
- if (!find_correspondences(points))
+ if (!find_correspondences(points, f))
{
//qDebug()<<"Error in finding point correspondences!";
X_CM = X_CM_old; // undo prediction
@@ -149,8 +169,7 @@ bool PointTracker::track(const vector<Vec2f>& points, float fov, float dt, int w
return false;
}
- // XXX TODO fov
- POSIT(fov, w, h, headpos);
+ int n_iter = POSIT(f);
//qDebug()<<"Number of POSIT iterations: "<<n_iter;
if (!init_phase)
@@ -179,7 +198,7 @@ void PointTracker::update_velocities(float dt)
v_t = (X_CM.t - X_CM_old.t)/dt;
}
-bool PointTracker::find_correspondences(const vector<Vec2f>& points)
+bool PointTracker::find_correspondences(const vector<Vec2f>& points, float f)
{
if (init_phase) {
// We do a simple freetrack-like sorting in the init phase...
@@ -196,9 +215,9 @@ bool PointTracker::find_correspondences(const vector<Vec2f>& points)
else {
// ... otherwise we look at the distance to the projection of the expected model points
// project model points under current pose
- p_exp[0] = project(Vec3f(0,0,0));
- p_exp[1] = project(point_model->M01);
- p_exp[2] = project(point_model->M02);
+ p_exp[0] = project(Vec3f(0,0,0), f);
+ p_exp[1] = project(point_model->M01, f);
+ p_exp[2] = project(point_model->M02, f);
// set correspondences by minimum distance to projected model point
bool point_taken[PointModel::N_POINTS];
@@ -232,48 +251,130 @@ bool PointTracker::find_correspondences(const vector<Vec2f>& points)
-void PointTracker::POSIT(float fov, int w, int h, const cv::Vec3f& headpos)
+int PointTracker::POSIT(float f)
{
- // XXX hack
- this->fov = fov;
- _w = w;
- _h = h;
- std::vector<cv::Point3f> obj_points;
- std::vector<cv::Point2f> img_points;
+ // POSIT algorithm for coplanar points as presented in
+ // [Denis Oberkampf, Daniel F. DeMenthon, Larry S. Davis: "Iterative Pose Estimation Using Coplanar Feature Points"]
+ // we use the same notation as in the paper here
+
+ // The expected rotation used for resolving the ambiguity in POSIT:
+ // In every iteration step the rotation closer to R_expected is taken
+ Matx33f R_expected;
+ if (init_phase)
+ R_expected = Matx33f::eye(); // in the init phase, we want to be close to the default pose = no rotation
+ else
+ R_expected = X_CM.R; // later we want to be close to the last (predicted) rotation
+
+ // initial pose = last (predicted) pose
+ Vec3f k;
+ get_row(R_expected, 2, k);
+ float Z0 = init_phase ? 1000 : X_CM.t[2];
+
+ float old_epsilon_1 = 0;
+ float old_epsilon_2 = 0;
+ float epsilon_1 = 1;
+ float epsilon_2 = 1;
+
+ Vec3f I0, J0;
+ Vec2f I0_coeff, J0_coeff;
+
+ Vec3f I_1, J_1, I_2, J_2;
+ Matx33f R_1, R_2;
+ Matx33f* R_current;
+
+ const int MAX_ITER = 100;
+ const float EPS_THRESHOLD = 1e-4;
+
+ int i=1;
+ for (; i<MAX_ITER; ++i)
+ {
+ epsilon_1 = k.dot(point_model->M01)/Z0;
+ epsilon_2 = k.dot(point_model->M02)/Z0;
+
+ // vector of scalar products <I0, M0i> and <J0, M0i>
+ Vec2f I0_M0i(p[1][0]*(1.0 + epsilon_1) - p[0][0],
+ p[2][0]*(1.0 + epsilon_2) - p[0][0]);
+ Vec2f J0_M0i(p[1][1]*(1.0 + epsilon_1) - p[0][1],
+ p[2][1]*(1.0 + epsilon_2) - p[0][1]);
+
+ // construct projection of I, J onto M0i plane: I0 and J0
+ I0_coeff = point_model->P * I0_M0i;
+ J0_coeff = point_model->P * J0_M0i;
+ I0 = I0_coeff[0]*point_model->M01 + I0_coeff[1]*point_model->M02;
+ J0 = J0_coeff[0]*point_model->M01 + J0_coeff[1]*point_model->M02;
+
+ // calculate u component of I, J
+ float II0 = I0.dot(I0);
+ float IJ0 = I0.dot(J0);
+ float JJ0 = J0.dot(J0);
+ float rho, theta;
+ if (JJ0 == II0) {
+ rho = sqrt(abs(2*IJ0));
+ theta = -PI/4;
+ if (IJ0<0) theta *= -1;
+ }
+ else {
+ rho = sqrt(sqrt( (JJ0-II0)*(JJ0-II0) + 4*IJ0*IJ0 ));
+ theta = atan( -2*IJ0 / (JJ0-II0) );
+ if (JJ0 - II0 < 0) theta += PI;
+ theta /= 2;
+ }
- obj_points.push_back(headpos);
- obj_points.push_back(point_model->M01 + headpos);
- obj_points.push_back(point_model->M02 + headpos);
+ // construct the two solutions
+ I_1 = I0 + rho*cos(theta)*point_model->u;
+ I_2 = I0 - rho*cos(theta)*point_model->u;
- img_points.push_back(p[0]);
- img_points.push_back(p[1]);
- img_points.push_back(p[2]);
+ J_1 = J0 + rho*sin(theta)*point_model->u;
+ J_2 = J0 - rho*sin(theta)*point_model->u;
- const float HT_PI = 3.1415926535;
+ float norm_const = 1.0/norm(I_1); // all have the same norm
+
+ // create rotation matrices
+ I_1 *= norm_const; J_1 *= norm_const;
+ I_2 *= norm_const; J_2 *= norm_const;
- const float focal_length_w = 0.5 * w / tan(fov * HT_PI / 180);
- const float focal_length_h = 0.5 * h / tan(fov * h / w * HT_PI / 180.0);
+ set_row(R_1, 0, I_1);
+ set_row(R_1, 1, J_1);
+ set_row(R_1, 2, I_1.cross(J_1));
+
+ set_row(R_2, 0, I_2);
+ set_row(R_2, 1, J_2);
+ set_row(R_2, 2, I_2.cross(J_2));
- cv::Mat intrinsics = cv::Mat::eye(3, 3, CV_32FC1);
- intrinsics.at<float> (0, 0) = focal_length_w;
- intrinsics.at<float> (1, 1) = focal_length_h;
- intrinsics.at<float> (0, 2) = w/2;
- intrinsics.at<float> (1, 2) = h/2;
+ // the single translation solution
+ Z0 = norm_const * f;
- cv::Mat dist_coeffs = cv::Mat::zeros(5, 1, CV_32FC1);
+ // pick the rotation solution closer to the expected one
+ // in simple metric d(A,B) = || I - A * B^T ||
+ float R_1_deviation = norm(Matx33f::eye() - R_expected * R_1.t());
+ float R_2_deviation = norm(Matx33f::eye() - R_expected * R_2.t());
- bool lastp = !rvec.empty() && !tvec.empty() && !init_phase;
+ if (R_1_deviation < R_2_deviation)
+ R_current = &R_1;
+ else
+ R_current = &R_2;
- cv::solvePnP(obj_points, img_points, intrinsics, dist_coeffs, rvec, tvec, lastp, cv::ITERATIVE);
+ get_row(*R_current, 2, k);
- cv::Mat rmat;
- cv::Rodrigues(rvec, rmat);
+ // check for convergence condition
+ if (abs(epsilon_1 - old_epsilon_1) + abs(epsilon_2 - old_epsilon_2) < EPS_THRESHOLD)
+ break;
+ old_epsilon_1 = epsilon_1;
+ old_epsilon_2 = epsilon_2;
+ }
// apply results
- for (int i = 0; i < 3; i++)
- {
- X_CM.t[i] = tvec.at<double>(i);
- for (int j = 0; j < 3; j++)
- X_CM.R(i, j) = rmat.at<double>(i, j);
- }
+ X_CM.R = *R_current;
+ X_CM.t[0] = p[0][0] * Z0/f;
+ X_CM.t[1] = p[0][1] * Z0/f;
+ X_CM.t[2] = Z0;
+
+ return i;
+
+ //Rodrigues(X_CM.R, r);
+ //qDebug()<<"iter: "<<i;
+ //qDebug()<<"t: "<<X_CM.t[0]<<' '<<X_CM.t[1]<<' '<<X_CM.t[2];
+ //Vec3f r;
+ //
+ //qDebug()<<"r: "<<r[0]<<' '<<r[1]<<' '<<r[2]<<'\n';
}
diff --git a/FTNoIR_Tracker_PT/point_tracker.h b/FTNoIR_Tracker_PT/point_tracker.h
index e05e8f98..11034100 100644
--- a/FTNoIR_Tracker_PT/point_tracker.h
+++ b/FTNoIR_Tracker_PT/point_tracker.h
@@ -90,7 +90,7 @@ public:
// track the pose using the set of normalized point coordinates (x pos in range -0.5:0.5)
// f : (focal length)/(sensor width)
// dt : time since last call
- bool track(const std::vector<cv::Vec2f>& points, float fov, float dt, int w, int h, const cv::Vec3f &headpos);
+ bool track(const std::vector<cv::Vec2f>& points, float f, float dt);
boost::shared_ptr<PointModel> point_model;
bool dynamic_pose_resolution;
@@ -98,35 +98,15 @@ public:
FrameTrafo get_pose() const { return X_CM; }
void reset();
- float fov;
- int _w, _h;
protected:
- cv::Vec2f project(const cv::Vec3f& v_M)
+ inline cv::Vec2f project(const cv::Vec3f& v_M, float f)
{
- if (!rvec.empty() && !tvec.empty() && fov > 0)
- {
- const float HT_PI = 3.1415926535;
- const int w = _w, h = _h;
- const float focal_length_w = 0.5 * w / tan(fov * HT_PI / 180);
- const float focal_length_h = 0.5 * h / tan(fov * h / w * HT_PI / 180.0);
-
- cv::Mat intrinsics = cv::Mat::eye(3, 3, CV_32FC1);
- intrinsics.at<float> (0, 0) = focal_length_w;
- intrinsics.at<float> (1, 1) = focal_length_h;
- intrinsics.at<float> (0, 2) = w/2;
- intrinsics.at<float> (1, 2) = h/2;
- std::vector<cv::Point3f> xs;
- xs.push_back(v_M);
- cv::Mat dist_coeffs = cv::Mat::zeros(5, 1, CV_32FC1);
- std::vector<cv::Point2f> rets(1);
- cv::projectPoints(xs, rvec, tvec, intrinsics, dist_coeffs, rets);
- return rets[0];
- }
- return cv::Vec2f();
+ cv::Vec3f v_C = X_CM * v_M;
+ return cv::Vec2f(f*v_C[0]/v_C[2], f*v_C[1]/v_C[2]);
}
- bool find_correspondences(const std::vector<cv::Vec2f>& points);
+ bool find_correspondences(const std::vector<cv::Vec2f>& points, float f);
cv::Vec2f p[PointModel::N_POINTS]; // the points in model order
cv::Vec2f p_exp[PointModel::N_POINTS]; // the expected point positions
@@ -136,7 +116,7 @@ protected:
void reset_velocities();
- void POSIT(float fov, int w, int h, const cv::Vec3f &headpos); // The POSIT algorithm, returns the number of iterations
+ int POSIT(float f); // The POSIT algorithm, returns the number of iterations
bool init_phase;
float dt_valid; // time since last valid tracking result
@@ -144,7 +124,6 @@ protected:
cv::Vec3f v_r;
FrameTrafo X_CM; // trafo from model to camera
FrameTrafo X_CM_old;
- cv::Mat rvec, tvec;
};
#endif //POINTTRACKER_H