/* Copyright (c) 2012 Patrick Ruoff * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. */ #include "point_tracker.h" #include #include #include #include using namespace cv; using namespace boost; using namespace std; // ---------------------------------------------------------------------------- PointModel::PointModel(Vec3f M01, Vec3f M02) : M01(M01), M02(M02) { // calculate u u = M01.cross(M02); u /= norm(u); // calculate projection matrix on M01,M02 plane float s11 = M01.dot(M01); float s12 = M01.dot(M02); float s22 = M02.dot(M02); P = 1.0/(s11*s22-s12*s12) * Matx22f(s22, -s12, -s12, s11); // calculate d and d_order for simple freetrack-like point correspondence vector points; points.push_back(Vec2f(0,0)); points.push_back(Vec2f(M01[0], M01[1])); points.push_back(Vec2f(M02[0], M02[1])); // fit line to orthographically projected points // ERROR: yields wrong results with colinear points?! /* Vec4f line; fitLine(points, line, CV_DIST_L2, 0, 0.01, 0.01); d[0] = line[0]; d[1] = line[1]; */ // TODO: fix this d = Vec2f(M01[0]-M02[0], M01[1]-M02[1]); // sort model points get_d_order(points, d_order); } #ifdef OPENTRACK_API static bool d_vals_sort(const pair a, const pair b) { return a.first < b.first; } #endif void PointModel::get_d_order(const std::vector& points, int d_order[]) const { // get sort indices with respect to d scalar product vector< pair > d_vals; for (int i = 0; i(d.dot(points[i]), i)); struct { bool operator()(const pair& a, const pair& b) { return a.first < b.first; } } comp; std::sort(d_vals.begin(), d_vals.end(), #ifdef OPENTRACK_API d_vals_sort #else comp #endif ); for (int i = 0; i& points, float fov, float dt, int w, int h) { if (!dynamic_pose_resolution) init_phase = true; dt_valid += dt; // if there was no valid tracking result for too long, do a reset if (dt_valid > dt_reset) { //qDebug()<<"dt_valid "< dt_reset "<& points) { if (init_phase) { // We do a simple freetrack-like sorting in the init phase... // sort points int point_d_order[PointModel::N_POINTS]; point_model->get_d_order(points, point_d_order); // set correspondences for (int i=0; id_order[i]] = points[point_d_order[i]]; } } else { // ... otherwise we look at the distance to the projection of the expected model points // project model points under current pose p_exp[0] = project(Vec3f(0,0,0)); p_exp[1] = project(point_model->M01); p_exp[2] = project(point_model->M02); // set correspondences by minimum distance to projected model point bool point_taken[PointModel::N_POINTS]; for (int i=0; ifov = fov; _w = w; _h = h; std::vector obj_points; std::vector img_points; obj_points.push_back(cv::Vec3f(0, 0, 0)); obj_points.push_back(point_model->M01); obj_points.push_back(point_model->M02); img_points.push_back(p[0]); img_points.push_back(p[1]); img_points.push_back(p[2]); const float HT_PI = 3.1415926535; const float focal_length_w = 0.5 * w / tan(fov * HT_PI / 180); const float focal_length_h = 0.5 * h / tan(fov * h / w * HT_PI / 180.0); cv::Mat intrinsics = cv::Mat::eye(3, 3, CV_32FC1); intrinsics.at (0, 0) = focal_length_w; intrinsics.at (1, 1) = focal_length_h; intrinsics.at (0, 2) = w/2; intrinsics.at (1, 2) = h/2; cv::Mat dist_coeffs = cv::Mat::zeros(5, 1, CV_32FC1); bool lastp = !rvec.empty() && !tvec.empty(); cv::solvePnP(obj_points, img_points, intrinsics, dist_coeffs, rvec, tvec, lastp, cv::ITERATIVE); cv::Mat rmat; cv::Rodrigues(rvec, rmat); // finally, find the closer solution cv::Mat expected = cv::Mat(X_CM.R); cv::Mat eye = cv::Mat::eye(3, 3, CV_64FC1); double dev1 = norm(eye - expected * rmat.t()); double dev2 = norm(eye - expected * rmat); if (dev1 > dev2) { rmat = rmat.t(); cv::Rodrigues(rmat, rvec); } // apply results for (int i = 0; i < 3; i++) { X_CM.t[i] = tvec.at(i) * 1e-2; for (int j = 0; j < 3; j++) X_CM.R(i, j) = rmat.at(i, j); } }