summaryrefslogtreecommitdiffhomepage
path: root/ftnoir_tracker_pt/point_tracker.h
blob: 512681fca8a6a4009e702bcf15ec1217f1e03960 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
/* Copyright (c) 2012 Patrick Ruoff
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 */

#ifndef POINTTRACKER_H
#define POINTTRACKER_H

#include <opencv2/opencv.hpp>
#ifndef OPENTRACK_API
#   include <boost/shared_ptr.hpp>
#else
#   include <memory>
#endif
#include <list>

// ----------------------------------------------------------------------------
// Affine frame trafo
class FrameTrafo
{
public:
	FrameTrafo() : R(cv::Matx33f::eye()), t(0,0,0) {}
	FrameTrafo(const cv::Matx33f& R, const cv::Vec3f& t) : R(R),t(t) {}

	cv::Matx33f R;
	cv::Vec3f t;
};

inline FrameTrafo operator*(const FrameTrafo& X, const FrameTrafo& Y)
{
	return FrameTrafo(X.R*Y.R, X.R*Y.t + X.t);
}

inline FrameTrafo operator*(const cv::Matx33f& X, const FrameTrafo& Y)
{
	return FrameTrafo(X*Y.R, X*Y.t);
}

inline FrameTrafo operator*(const FrameTrafo& X, const cv::Matx33f& Y)
{
	return FrameTrafo(X.R*Y, X.t);
}

inline cv::Vec3f operator*(const FrameTrafo& X, const cv::Vec3f& v)
{
	return X.R*v + X.t;
}


// ----------------------------------------------------------------------------
// Describes a 3-point model
// nomenclature as in
// [Denis Oberkampf, Daniel F. DeMenthon, Larry S. Davis: "Iterative Pose Estimation Using Coplanar Feature Points"]
class PointModel
{
	friend class PointTracker;
public:
	static const int N_POINTS = 3;

	PointModel(cv::Vec3f M01, cv::Vec3f M02);

	const cv::Vec3f& get_M01() const { return M01; };
	const cv::Vec3f& get_M02() const { return M02; };

protected:
	cv::Vec3f M01;	// M01 in model frame
	cv::Vec3f M02;	// M02 in model frame

	cv::Vec3f u;	// unit vector perpendicular to M01,M02-plane

	cv::Matx22f P;

	cv::Vec2f d;	// discriminant vector for point correspondence
	int d_order[3];	// sorting of projected model points with respect to d scalar product 

	void get_d_order(const std::vector<cv::Vec2f>& points, int d_order[]) const;
};

// ----------------------------------------------------------------------------
// Tracks a 3-point model
// implementing the POSIT algorithm for coplanar points as presented in
// [Denis Oberkampf, Daniel F. DeMenthon, Larry S. Davis: "Iterative Pose Estimation Using Coplanar Feature Points"]
class PointTracker
{
public:
	PointTracker();

	// track the pose using the set of normalized point coordinates (x pos in range -0.5:0.5)
	// f : (focal length)/(sensor width)
	// dt : time since last call
	bool track(const std::vector<cv::Vec2f>& points, float f, float dt);
	std::shared_ptr<PointModel> point_model;

	bool dynamic_pose_resolution;
	float dt_reset;

	FrameTrafo get_pose() const { return X_CM; }
	void reset();

protected:
	inline cv::Vec2f project(const cv::Vec3f& v_M, float f)
	{
		cv::Vec3f v_C = X_CM * v_M;
		return cv::Vec2f(f*v_C[0]/v_C[2], f*v_C[1]/v_C[2]);
	}

	bool find_correspondences(const std::vector<cv::Vec2f>& points, float f);

	cv::Vec2f p[PointModel::N_POINTS];	// the points in model order
	cv::Vec2f p_exp[PointModel::N_POINTS];	// the expected point positions

	void predict(float dt);
	void update_velocities(float dt);
	void reset_velocities();

	
	int POSIT(float f);  // The POSIT algorithm, returns the number of iterations

	bool init_phase;
	float dt_valid;	// time since last valid tracking result
	cv::Vec3f v_t;	// velocities
	cv::Vec3f v_r;
	FrameTrafo X_CM; // trafo from model to camera
	FrameTrafo X_CM_old;
};

#endif //POINTTRACKER_H