summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorStanislaw Halik <sthalik@misaki.pl>2014-09-23 02:12:20 +0200
committerStanislaw Halik <sthalik@misaki.pl>2014-09-23 02:12:20 +0200
commitd74b99391bbdfb25f9559834082ae7ee6d30720d (patch)
tree26d035c1c7680728f1c93cba42f8b121e1d40679
parentcf84c354b30b39fe04a79f457947f7f778bc8fc7 (diff)
decruft PT more, so it doesn't crash finally
-rw-r--r--ftnoir_tracker_pt/camera.cpp5
-rw-r--r--ftnoir_tracker_pt/camera.h20
-rw-r--r--ftnoir_tracker_pt/frame_observer.cpp18
-rw-r--r--ftnoir_tracker_pt/frame_observer.h76
-rw-r--r--ftnoir_tracker_pt/ftnoir_tracker_pt.cpp44
-rw-r--r--ftnoir_tracker_pt/ftnoir_tracker_pt.h23
-rw-r--r--ftnoir_tracker_pt/ftnoir_tracker_pt_dialog.cpp108
-rw-r--r--ftnoir_tracker_pt/ftnoir_tracker_pt_dialog.h14
-rw-r--r--ftnoir_tracker_pt/point_extractor.cpp36
-rw-r--r--ftnoir_tracker_pt/point_extractor.h2
-rw-r--r--ftnoir_tracker_pt/point_tracker.cpp75
-rw-r--r--ftnoir_tracker_pt/point_tracker.h25
-rw-r--r--ftnoir_tracker_pt/pt_video_widget.cpp18
-rw-r--r--ftnoir_tracker_pt/pt_video_widget.h25
14 files changed, 150 insertions, 339 deletions
diff --git a/ftnoir_tracker_pt/camera.cpp b/ftnoir_tracker_pt/camera.cpp
index 74b24093..861c83cc 100644
--- a/ftnoir_tracker_pt/camera.cpp
+++ b/ftnoir_tracker_pt/camera.cpp
@@ -188,11 +188,6 @@ bool CVCamera::_get_frame(Mat* frame)
return false;
}
-void CVCamera::_set_index()
-{
- if (active) restart();
-}
-
void CVCamera::_set_fps()
{
if (cap) cap->set(CV_CAP_PROP_FPS, cam_desired.fps);
diff --git a/ftnoir_tracker_pt/camera.h b/ftnoir_tracker_pt/camera.h
index e2ba56c4..86cafd42 100644
--- a/ftnoir_tracker_pt/camera.h
+++ b/ftnoir_tracker_pt/camera.h
@@ -25,12 +25,11 @@ void get_camera_device_names(std::vector<std::string>& device_names);
// ----------------------------------------------------------------------------
struct CamInfo
{
- CamInfo() : res_x(0), res_y(0), fps(0), f(1) {}
+ CamInfo() : res_x(0), res_y(0), fps(0) {}
int res_x;
int res_y;
int fps;
- float f; // (focal length) / (sensor width)
};
// ----------------------------------------------------------------------------
@@ -39,7 +38,7 @@ class Camera
{
public:
Camera() : dt_valid(0), dt_mean(0), desired_index(0), active_index(-1), active(false) {}
- virtual ~Camera() {}
+ virtual ~Camera() = 0;
// start/stop capturing
virtual void start() = 0;
@@ -75,7 +74,7 @@ protected:
CamInfo cam_info;
CamInfo cam_desired;
};
-
+inline Camera::~Camera() {}
// ----------------------------------------------------------------------------
// camera based on OpenCV's videoCapture
@@ -86,15 +85,14 @@ public:
CVCamera() : cap(NULL) {}
~CVCamera() { stop(); }
- virtual void start();
- virtual void stop();
+ void start() override;
+ void stop() override;
protected:
- virtual bool _get_frame(cv::Mat* frame);
- virtual void _set_index();
- virtual void _set_fps();
- virtual void _set_res();
- virtual void _set_device_index();
+ bool _get_frame(cv::Mat* frame) override;
+ void _set_fps() override;
+ void _set_res() override;
+ void _set_device_index() override;
cv::VideoCapture* cap;
};
diff --git a/ftnoir_tracker_pt/frame_observer.cpp b/ftnoir_tracker_pt/frame_observer.cpp
deleted file mode 100644
index 76dee351..00000000
--- a/ftnoir_tracker_pt/frame_observer.cpp
+++ /dev/null
@@ -1,18 +0,0 @@
-/* Copyright (c) 2013 Patrick Ruoff
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- */
-
-#include "frame_observer.h"
-
-//-----------------------------------------------------------------------------
-FrameProvider::~FrameProvider()
-{
- QMutexLocker lock(&observer_mutex);
- for (std::set<FrameObserver*>::iterator iter=frame_observers.begin(); iter!=frame_observers.end(); ++iter)
- {
- (*iter)->on_frame_provider_destroy();
- }
-}
diff --git a/ftnoir_tracker_pt/frame_observer.h b/ftnoir_tracker_pt/frame_observer.h
deleted file mode 100644
index ca8ffb46..00000000
--- a/ftnoir_tracker_pt/frame_observer.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* Copyright (c) 2013 Patrick Ruoff
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- */
-
-#ifndef FRAME_OBSERVER_H
-#define FRAME_OBSERVER_H
-
-#include <QMutex>
-#include <opencv2/opencv.hpp>
-#ifndef OPENTRACK_API
-# include <boost/shared_ptr.hpp>
-#else
-# include <memory>
-#endif
-#include <set>
-
-//-----------------------------------------------------------------------------
-// Forward declarations
-class FrameObserver;
-
-//-----------------------------------------------------------------------------
-// Provides means to copy frame and point information if it has observers
-// Instantiate a FrameObserver to get the information
-class FrameProvider
-{
- friend class FrameObserver;
-public:
- ~FrameProvider();
-
-protected:
- virtual bool get_frame_and_points(cv::Mat& frame, std::shared_ptr< std::vector<cv::Vec2f> >& points) = 0;
-
- bool has_observers() const { QMutexLocker lock(&observer_mutex); return !frame_observers.empty(); }
-
-private:
- mutable QMutex observer_mutex;
- void add_observer(FrameObserver* obs) { QMutexLocker lock(&observer_mutex); frame_observers.insert(obs); }
- void remove_observer(FrameObserver* obs) { QMutexLocker lock(&observer_mutex); frame_observers.erase(obs); }
- std::set<FrameObserver*> frame_observers;
-};
-
-//-----------------------------------------------------------------------------
-// Used to get frame and point information from MutexedFrameProvider
-// Destroy instance if not interested anymore since a living
-// FrameObserver instance causes MutexedFrameProvider to provide the information,
-// potentially reducing its performance
-class FrameObserver
-{
-public:
- FrameObserver(FrameProvider* provider) : provider(provider) {
- provider->add_observer(this);
- }
-
- ~FrameObserver() {
- if (provider) provider->remove_observer(this);
- }
-
- bool get_frame_and_points(cv::Mat& frame, std::shared_ptr< std::vector<cv::Vec2f> >& points) {
- return provider ? provider->get_frame_and_points(frame, points) : false;
- }
-
- void on_frame_provider_destroy() {
- provider = NULL;
- }
-
-protected:
- FrameProvider* provider;
-
-private:
- FrameObserver(const FrameObserver&);
-};
-
-#endif //FRAME_OBSERVER_H
diff --git a/ftnoir_tracker_pt/ftnoir_tracker_pt.cpp b/ftnoir_tracker_pt/ftnoir_tracker_pt.cpp
index 23da97ca..1136ad4a 100644
--- a/ftnoir_tracker_pt/ftnoir_tracker_pt.cpp
+++ b/ftnoir_tracker_pt/ftnoir_tracker_pt.cpp
@@ -17,9 +17,6 @@ using namespace cv;
//#define PT_PERF_LOG //log performance
-const float rad2deg = 180.0/3.14159265;
-const float deg2rad = 1.0/rad2deg;
-
//-----------------------------------------------------------------------------
Tracker::Tracker()
: mutex(QMutex::Recursive),
@@ -27,7 +24,6 @@ Tracker::Tracker()
video_widget(NULL),
video_frame(NULL),
new_settings(nullptr)
-
{
}
@@ -54,30 +50,28 @@ void Tracker::reset_command(Command command)
void Tracker::run()
{
- qDebug()<<"Tracker:: Thread started";
+ qDebug()<< "pt: thread started";
#ifdef PT_PERF_LOG
QFile log_file(QCoreApplication::applicationDirPath() + "/PointTrackerPerformance.txt");
if (!log_file.open(QIODevice::WriteOnly | QIODevice::Text)) return;
QTextStream log_stream(&log_file);
#endif
-
time.start();
- bool new_frame;
forever
{
if (commands & ABORT) break;
commands = 0;
apply_inner();
const double dt = time.start() * 1e-9;
- new_frame = camera.get_frame(dt, &frame);
+ const bool new_frame = camera.get_frame(dt, &frame);
if (new_frame && !frame.empty())
{
QMutexLocker lock(&mutex);
frame = frame_rotation.rotate_frame(frame);
- const std::vector<cv::Vec2f>& points = point_extractor.extract_points(frame, dt, true);
+ const std::vector<cv::Vec2f>& points = point_extractor.extract_points(frame);
for (auto p : points)
{
auto p2 = cv::Point(p[0] * frame.cols + frame.cols/2, -p[1] * frame.cols + frame.rows/2);
@@ -93,7 +87,8 @@ void Tracker::run()
color,
4);
}
- point_tracker.track(points, camera.get_info().f);
+ if (points.size() == PointModel::N_POINTS)
+ point_tracker.track(points, model);
video_widget->update_image(frame);
}
#ifdef PT_PERF_LOG
@@ -119,6 +114,12 @@ void Tracker::apply_inner()
reset();
auto& s = *tmp;
qDebug()<<"Tracker:: Applying settings";
+
+ {
+ cv::Vec3f M01(s.m01_x, s.m01_y, s.m01_z);
+ cv::Vec3f M02(s.m02_x, s.m02_y, s.m02_z);
+ model = PointModel(M01, M02);
+ }
camera.set_device_index(s.cam_index);
camera.set_res(s.cam_res_x, s.cam_res_y);
camera.set_fps(s.cam_fps);
@@ -127,11 +128,6 @@ void Tracker::apply_inner()
point_extractor.threshold_secondary_val = s.threshold_secondary;
point_extractor.min_size = s.min_point_size;
point_extractor.max_size = s.max_point_size;
- {
- cv::Vec3f M01(s.m01_x, s.m01_y, s.m01_z);
- cv::Vec3f M02(s.m02_x, s.m02_y, s.m02_z);
- point_tracker.point_model = std::shared_ptr<PointModel>(new PointModel(M01, M02));
- }
t_MH = cv::Vec3f(s.t_MH_x, s.t_MH_y, s.t_MH_z);
R_GC = Matx33f( cos(deg2rad*s.cam_yaw), 0, sin(deg2rad*s.cam_yaw),
0, 1, 0,
@@ -160,28 +156,12 @@ void Tracker::center()
X_GH_0 = R_GC * X_CM_0 * X_MH;
}
-bool Tracker::get_frame_and_points(cv::Mat& frame_copy, std::shared_ptr< std::vector<Vec2f> >& points)
-{
- QMutexLocker lock(&mutex);
- if (frame.empty()) return false;
-
- // copy the frame and points from the tracker thread
- frame_copy = frame.clone();
- points = std::shared_ptr< vector<Vec2f> >(new vector<Vec2f>(point_extractor.get_points()));
- return true;
-}
-
-void Tracker::refreshVideo()
-{
- if (video_widget) video_widget->update_frame_and_points();
-}
-
void Tracker::StartTracker(QFrame *parent_window)
{
this->video_frame = parent_window;
video_frame->setAttribute(Qt::WA_NativeWindow);
video_frame->show();
- video_widget = new PTVideoWidget(video_frame, this);
+ video_widget = new PTVideoWidget(video_frame);
QHBoxLayout* video_layout = new QHBoxLayout(parent_window);
video_layout->setContentsMargins(0, 0, 0, 0);
video_layout->addWidget(video_widget);
diff --git a/ftnoir_tracker_pt/ftnoir_tracker_pt.h b/ftnoir_tracker_pt/ftnoir_tracker_pt.h
index 6ddfbe5d..5bcfd37d 100644
--- a/ftnoir_tracker_pt/ftnoir_tracker_pt.h
+++ b/ftnoir_tracker_pt/ftnoir_tracker_pt.h
@@ -12,7 +12,6 @@
# include "facetracknoir/plugin-api.hpp"
#endif
#include "ftnoir_tracker_pt_settings.h"
-#include "frame_observer.h"
#include "camera.h"
#include "point_extractor.h"
#include "point_tracker.h"
@@ -34,30 +33,25 @@
//-----------------------------------------------------------------------------
// Constantly processes the tracking chain in a separate thread
-class Tracker : public ITracker, QThread, public FrameProvider
+class Tracker : public ITracker, protected QThread
{
public:
Tracker();
- virtual ~Tracker();
- virtual void StartTracker(QFrame* parent_window);
- virtual void GetHeadPoseData(double* data);
- virtual void refreshVideo();
+ ~Tracker() override;
+ void StartTracker(QFrame* parent_window) override;
+ void GetHeadPoseData(double* data) override;
void apply(settings& s);
void apply_inner();
void center();
void reset(); // reset the trackers internal state variables
- void run();
void get_pose(FrameTrafo* X_CM) { QMutexLocker lock(&mutex); *X_CM = point_tracker.get_pose(); }
int get_n_points() { QMutexLocker lock(&mutex); return point_extractor.get_points().size(); }
void get_cam_info(CamInfo* info) { QMutexLocker lock(&mutex); *info = camera.get_info(); }
-
+protected:
+ void run() override;
private:
- // --- MutexedFrameProvider interface ---
- virtual bool get_frame_and_points(cv::Mat& frame, std::shared_ptr< std::vector<cv::Vec2f> >& points);
-
- // --- thread ---
QMutex mutex;
// thread commands
enum Command {
@@ -85,6 +79,11 @@ private:
settings s;
std::atomic<settings*> new_settings;
Timer time;
+
+ static constexpr double rad2deg = 180.0/3.14159265;
+ static constexpr double deg2rad = 3.14159265/180.0;
+
+ PointModel model;
};
#undef VideoWidget
diff --git a/ftnoir_tracker_pt/ftnoir_tracker_pt_dialog.cpp b/ftnoir_tracker_pt/ftnoir_tracker_pt_dialog.cpp
index 9529e268..6cd6135c 100644
--- a/ftnoir_tracker_pt/ftnoir_tracker_pt_dialog.cpp
+++ b/ftnoir_tracker_pt/ftnoir_tracker_pt_dialog.cpp
@@ -22,13 +22,9 @@ using namespace std;
//-----------------------------------------------------------------------------
TrackerDialog::TrackerDialog()
: tracker(NULL),
- video_widget_dialog(NULL),
- timer(this),
- trans_calib_running(false)
+ timer(this),
+ trans_calib_running(false)
{
- qDebug()<<"TrackerDialog::TrackerDialog";
- setAttribute(Qt::WA_DeleteOnClose, false);
-
ui.setupUi( this );
vector<string> device_names;
@@ -147,6 +143,38 @@ void TrackerDialog::startstop_trans_calib(bool start)
}
}
+void TrackerDialog::poll_tracker_info()
+{
+ if (tracker)
+ {
+ QString to_print;
+
+ // display caminfo
+ CamInfo info;
+ tracker->get_cam_info(&info);
+ to_print = QString::number(info.res_x)+"x"+QString::number(info.res_y)+" @ "+QString::number(info.fps)+" FPS";
+ ui.caminfo_label->setText(to_print);
+
+ // display pointinfo
+ int n_points = tracker->get_n_points();
+ to_print = QString::number(n_points);
+ if (n_points == 3)
+ to_print += " OK!";
+ else
+ to_print += " BAD!";
+ ui.pointinfo_label->setText(to_print);
+
+ // update calibration
+ if (trans_calib_running) trans_calib_step();
+ }
+ else
+ {
+ QString to_print = "Tracker offline";
+ ui.caminfo_label->setText(to_print);
+ ui.pointinfo_label->setText(to_print);
+ }
+}
+
void TrackerDialog::trans_calib_step()
{
if (tracker)
@@ -197,7 +225,7 @@ void TrackerDialog::do_apply_without_saving(QAbstractButton*)
void TrackerDialog::doApply()
{
- save();
+ save();
}
void TrackerDialog::doCancel()
@@ -206,71 +234,6 @@ void TrackerDialog::doCancel()
close();
}
-void TrackerDialog::widget_destroyed(QObject* obj)
-{
- if (obj == video_widget_dialog) {
- // widget was / will be already deleted by Qt
- destroy_video_widget(false);
- }
-}
-
-void TrackerDialog::create_video_widget()
-{
- // this should not happen but better be sure
- if (video_widget_dialog) destroy_video_widget();
- if (!tracker) return;
-
- video_widget_dialog = new VideoWidgetDialog(this, tracker);
- video_widget_dialog->setAttribute( Qt::WA_DeleteOnClose );
- connect( video_widget_dialog, SIGNAL(destroyed(QObject*)), this, SLOT(widget_destroyed(QObject*)) );
- video_widget_dialog->show();
-}
-
-void TrackerDialog::destroy_video_widget(bool do_delete /*= true*/)
-{
- if (video_widget_dialog) {
- if (do_delete) delete video_widget_dialog;
- video_widget_dialog = NULL;
- }
-}
-
-void TrackerDialog::poll_tracker_info()
-{
- if (tracker)
- {
- QString to_print;
-
- // display caminfo
- CamInfo info;
- tracker->get_cam_info(&info);
- to_print = QString::number(info.res_x)+"x"+QString::number(info.res_y)+" @ "+QString::number(info.fps)+" FPS";
- ui.caminfo_label->setText(to_print);
-
- // display pointinfo
- int n_points = tracker->get_n_points();
- to_print = QString::number(n_points);
- if (n_points == 3)
- to_print += " OK!";
- else
- to_print += " BAD!";
- ui.pointinfo_label->setText(to_print);
-
- // update calibration
- if (trans_calib_running) trans_calib_step();
-
- // update videowidget
- if (video_widget_dialog) {
- video_widget_dialog->get_video_widget()->update_frame_and_points();
- }
- }
- else
- {
- QString to_print = "Tracker offline";
- ui.caminfo_label->setText(to_print);
- ui.pointinfo_label->setText(to_print);
- }
-}
-
void TrackerDialog::registerTracker(ITracker *t)
{
qDebug()<<"TrackerDialog:: Tracker registered";
@@ -285,7 +248,6 @@ void TrackerDialog::unRegisterTracker()
{
qDebug()<<"TrackerDialog:: Tracker un-registered";
tracker = NULL;
- destroy_video_widget();
ui.tcalib_button->setEnabled(false);
//ui.center_button->setEnabled(false);
}
diff --git a/ftnoir_tracker_pt/ftnoir_tracker_pt_dialog.h b/ftnoir_tracker_pt/ftnoir_tracker_pt_dialog.h
index dbb93f30..bff12dd0 100644
--- a/ftnoir_tracker_pt/ftnoir_tracker_pt_dialog.h
+++ b/ftnoir_tracker_pt/ftnoir_tracker_pt_dialog.h
@@ -23,13 +23,13 @@
//-----------------------------------------------------------------------------
// The dialog that shows up when the user presses "Settings"
-class TrackerDialog : public QWidget, Ui::UICPTClientControls, public ITrackerDialog
+class TrackerDialog : public QWidget, public ITrackerDialog
{
Q_OBJECT
public:
TrackerDialog();
- void registerTracker(ITracker *tracker);
- void unRegisterTracker();
+ void registerTracker(ITracker *tracker) override;
+ void unRegisterTracker() override;
void save();
void trans_calib_step();
@@ -40,14 +40,9 @@ public slots:
void do_apply_without_saving(QAbstractButton *);
void startstop_trans_calib(bool start);
- void widget_destroyed(QObject* obj);
- void create_video_widget();
void poll_tracker_info();
void set_model(int idx);
-
-protected:
- void destroy_video_widget(bool do_delete = true);
-
+private:
void set_model_clip();
void set_model_cap();
void set_model_custom();
@@ -56,7 +51,6 @@ protected:
settings s;
Tracker* tracker;
- VideoWidgetDialog* video_widget_dialog;
QTimer timer;
TranslationCalibrator trans_calib;
diff --git a/ftnoir_tracker_pt/point_extractor.cpp b/ftnoir_tracker_pt/point_extractor.cpp
index b0e29270..819bf5e8 100644
--- a/ftnoir_tracker_pt/point_extractor.cpp
+++ b/ftnoir_tracker_pt/point_extractor.cpp
@@ -20,7 +20,7 @@ PointExtractor::PointExtractor(){
//freopen("CON", "w", stderr);
}
// ----------------------------------------------------------------------------
-const vector<Vec2f>& PointExtractor::extract_points(Mat frame, float /*dt*/, bool draw_output)
+const vector<Vec2f>& PointExtractor::extract_points(Mat& frame)
{
const int W = frame.cols;
const int H = frame.rows;
@@ -60,7 +60,7 @@ const vector<Vec2f>& PointExtractor::extract_points(Mat frame, float /*dt*/, boo
threshold(frame_gray, frame_bin, t, 255, THRESH_BINARY);
threshold(frame_gray, frame_bin_low,std::max(float(1), t - (t*hyst)), 255, THRESH_BINARY);
- if(draw_output) frame_bin.copyTo(frame_bin_copy);
+ frame_bin.copyTo(frame_bin_copy);
if(frame_last.empty()){
frame_bin.copyTo(frame_last);
}else{
@@ -141,23 +141,21 @@ const vector<Vec2f>& PointExtractor::extract_points(Mat frame, float /*dt*/, boo
}
// draw output image
- if (draw_output) {
- vector<Mat> channels;
- if(secondary==0){
- frame_bin.setTo(170, frame_bin);
- channels.push_back(frame_gray + frame_bin);
- channels.push_back(frame_gray - frame_bin);
- channels.push_back(frame_gray - frame_bin);
- }else{
- frame_bin_copy.setTo(120, frame_bin_copy);
- frame_bin_low.setTo(90, frame_bin_low);
- channels.push_back(frame_gray + frame_bin_copy);
- channels.push_back(frame_gray + frame_last_and_low);
- channels.push_back(frame_gray + frame_bin_low);
- //channels.push_back(frame_gray + frame_bin);
- }
- merge(channels, frame);
- }
+ vector<Mat> channels;
+ if(secondary==0){
+ frame_bin.setTo(170, frame_bin);
+ channels.push_back(frame_gray + frame_bin);
+ channels.push_back(frame_gray - frame_bin);
+ channels.push_back(frame_gray - frame_bin);
+ }else{
+ frame_bin_copy.setTo(120, frame_bin_copy);
+ frame_bin_low.setTo(90, frame_bin_low);
+ channels.push_back(frame_gray + frame_bin_copy);
+ channels.push_back(frame_gray + frame_last_and_low);
+ channels.push_back(frame_gray + frame_bin_low);
+ //channels.push_back(frame_gray + frame_bin);
+ }
+ merge(channels, frame);
return points;
}
diff --git a/ftnoir_tracker_pt/point_extractor.h b/ftnoir_tracker_pt/point_extractor.h
index 8a76747b..21d548af 100644
--- a/ftnoir_tracker_pt/point_extractor.h
+++ b/ftnoir_tracker_pt/point_extractor.h
@@ -19,7 +19,7 @@ public:
// extracts points from frame and draws some processing info into frame, if draw_output is set
// dt: time since last call in seconds
// WARNING: returned reference is valid as long as object
- const std::vector<cv::Vec2f>& extract_points(cv::Mat frame, float dt, bool draw_output);
+ const std::vector<cv::Vec2f>& extract_points(cv::Mat &frame);
const std::vector<cv::Vec2f>& get_points() { return points; }
PointExtractor();
diff --git a/ftnoir_tracker_pt/point_tracker.cpp b/ftnoir_tracker_pt/point_tracker.cpp
index 5f57baf5..8a633c5d 100644
--- a/ftnoir_tracker_pt/point_tracker.cpp
+++ b/ftnoir_tracker_pt/point_tracker.cpp
@@ -33,10 +33,14 @@ static void set_row(Matx33f& m, int i, const Vec3f& v)
m(i,2) = v[2];
}
-// ----------------------------------------------------------------------------
+PointModel::PointModel() :
+ M01 { 0, 0, 0 },
+ M02 { 0, 0, 0 }
+{
+}
+
PointModel::PointModel(Vec3f M01, Vec3f M02)
- : M01(M01),
- M02(M02)
+ : M01(M01), M02(M02)
{
// calculate u
u = M01.cross(M02);
@@ -107,27 +111,31 @@ void PointTracker::reset()
X_CM = FrameTrafo();
}
-void PointTracker::track(const vector<Vec2f>& points, float f)
+void PointTracker::track(const vector<Vec2f>& projected_points, const PointModel& model)
{
- find_correspondences(points, f);
- (void) POSIT(f);
- //qDebug()<<"Number of POSIT iterations: "<<n_iter;
+ const PointOrder& order = find_correspondences(projected_points, model);
+ int iters = POSIT(model, order);
+ qDebug()<<"POSIT iterations:"<<iters;
}
-void PointTracker::find_correspondences(const std::vector<cv::Vec2f>& points, float f)
+PointTracker::PointOrder PointTracker::find_correspondences(const std::vector<cv::Vec2f>& projected_points, const PointModel& model)
{
// ... otherwise we look at the distance to the projection of the expected model points
// project model points under current pose
Vec2f p_exp[3];
- p_exp[0] = project(Vec3f(0,0,0), f);
- p_exp[1] = project(point_model->M01, f);
- p_exp[2] = project(point_model->M02, f);
+ p_exp[0] = project(Vec3f(0,0,0));
+ p_exp[1] = project(model.get_M01());
+ p_exp[2] = project(model.get_M02());
// set correspondences by minimum distance to projected model point
bool point_taken[PointModel::N_POINTS];
for (int i=0; i<PointModel::N_POINTS; ++i)
point_taken[i] = false;
+ PointOrder p;
+ for (int i=0; i<PointModel::N_POINTS; ++i)
+ p.points[i] = Vec2f(0, 0);
+
for (int i=0; i<PointModel::N_POINTS; ++i)
{
float min_sdist = 1e4;
@@ -135,7 +143,7 @@ void PointTracker::find_correspondences(const std::vector<cv::Vec2f>& points, fl
// find closest point to projected model point i
for (int j=0; j<PointModel::N_POINTS; ++j)
{
- Vec2f d = p_exp[i]-points[j];
+ Vec2f d = p_exp[i]-projected_points[j];
float sdist = d.dot(d);
if (sdist < min_sdist)
{
@@ -144,15 +152,16 @@ void PointTracker::find_correspondences(const std::vector<cv::Vec2f>& points, fl
}
}
// if one point is closest to more than one model point, abort
- if (point_taken[min_idx]) return;
+ if (point_taken[min_idx]) return p;
point_taken[min_idx] = true;
- p[i] = points[min_idx];
+ p.points[i] = projected_points[min_idx];
}
+ return p;
}
-int PointTracker::POSIT(float f)
+int PointTracker::POSIT(const PointModel& model, const PointOrder& order_)
{
// POSIT algorithm for coplanar points as presented in
// [Denis Oberkampf, Daniel F. DeMenthon, Larry S. Davis: "Iterative Pose Estimation Using Coplanar Feature Points"]
@@ -182,24 +191,26 @@ int PointTracker::POSIT(float f)
const int MAX_ITER = 100;
const float EPS_THRESHOLD = 1e-4;
+
+ const cv::Vec2f* order = order_.points;
int i=1;
for (; i<MAX_ITER; ++i)
{
- epsilon_1 = k.dot(point_model->M01)/Z0;
- epsilon_2 = k.dot(point_model->M02)/Z0;
+ epsilon_1 = k.dot(model.M01)/Z0;
+ epsilon_2 = k.dot(model.M02)/Z0;
// vector of scalar products <I0, M0i> and <J0, M0i>
- Vec2f I0_M0i(p[1][0]*(1.0 + epsilon_1) - p[0][0],
- p[2][0]*(1.0 + epsilon_2) - p[0][0]);
- Vec2f J0_M0i(p[1][1]*(1.0 + epsilon_1) - p[0][1],
- p[2][1]*(1.0 + epsilon_2) - p[0][1]);
+ Vec2f I0_M0i(order[1][0]*(1.0 + epsilon_1) - order[0][0],
+ order[2][0]*(1.0 + epsilon_2) - order[0][0]);
+ Vec2f J0_M0i(order[1][1]*(1.0 + epsilon_1) - order[0][1],
+ order[2][1]*(1.0 + epsilon_2) - order[0][1]);
// construct projection of I, J onto M0i plane: I0 and J0
- I0_coeff = point_model->P * I0_M0i;
- J0_coeff = point_model->P * J0_M0i;
- I0 = I0_coeff[0]*point_model->M01 + I0_coeff[1]*point_model->M02;
- J0 = J0_coeff[0]*point_model->M01 + J0_coeff[1]*point_model->M02;
+ I0_coeff = model.P * I0_M0i;
+ J0_coeff = model.P * J0_M0i;
+ I0 = I0_coeff[0]*model.M01 + I0_coeff[1]*model.M02;
+ J0 = J0_coeff[0]*model.M01 + J0_coeff[1]*model.M02;
// calculate u component of I, J
float II0 = I0.dot(I0);
@@ -219,11 +230,11 @@ int PointTracker::POSIT(float f)
}
// construct the two solutions
- I_1 = I0 + rho*cos(theta)*point_model->u;
- I_2 = I0 - rho*cos(theta)*point_model->u;
+ I_1 = I0 + rho*cos(theta)*model.u;
+ I_2 = I0 - rho*cos(theta)*model.u;
- J_1 = J0 + rho*sin(theta)*point_model->u;
- J_2 = J0 - rho*sin(theta)*point_model->u;
+ J_1 = J0 + rho*sin(theta)*model.u;
+ J_2 = J0 - rho*sin(theta)*model.u;
float norm_const = 1.0/norm(I_1); // all have the same norm
@@ -240,7 +251,7 @@ int PointTracker::POSIT(float f)
set_row(R_2, 2, I_2.cross(J_2));
// the single translation solution
- Z0 = norm_const * f;
+ Z0 = norm_const * focal_length;
// pick the rotation solution closer to the expected one
// in simple metric d(A,B) = || I - A * B^T ||
@@ -263,8 +274,8 @@ int PointTracker::POSIT(float f)
// apply results
X_CM.R = *R_current;
- X_CM.t[0] = p[0][0] * Z0/f;
- X_CM.t[1] = p[0][1] * Z0/f;
+ X_CM.t[0] = order[0][0] * Z0/focal_length;
+ X_CM.t[1] = order[0][1] * Z0/focal_length;
X_CM.t[2] = Z0;
return i;
diff --git a/ftnoir_tracker_pt/point_tracker.h b/ftnoir_tracker_pt/point_tracker.h
index a1f6f041..0339f392 100644
--- a/ftnoir_tracker_pt/point_tracker.h
+++ b/ftnoir_tracker_pt/point_tracker.h
@@ -14,7 +14,7 @@
#else
# include <memory>
#endif
-#include <list>
+#include <vector>
// ----------------------------------------------------------------------------
// Affine frame trafo
@@ -60,9 +60,10 @@ public:
static constexpr int N_POINTS = 3;
PointModel(cv::Vec3f M01, cv::Vec3f M02);
+ PointModel();
- const cv::Vec3f& get_M01() const { return M01; }
- const cv::Vec3f& get_M02() const { return M02; }
+ inline const cv::Vec3f& get_M01() const { return M01; }
+ inline const cv::Vec3f& get_M02() const { return M02; }
private:
cv::Vec3f M01; // M01 in model frame
@@ -86,27 +87,27 @@ class PointTracker
{
public:
PointTracker();
-
// track the pose using the set of normalized point coordinates (x pos in range -0.5:0.5)
// f : (focal length)/(sensor width)
// dt : time since last call
- void track(const std::vector<cv::Vec2f>& points, float f);
- std::shared_ptr<PointModel> point_model;
-
+ void track(const std::vector<cv::Vec2f>& projected_points, const PointModel& model);
FrameTrafo get_pose() const { return X_CM; }
void reset();
private:
- inline cv::Vec2f project(const cv::Vec3f& v_M, float f)
+ // the points in model order
+ typedef struct { cv::Vec2f points[PointModel::N_POINTS]; } PointOrder;
+ static constexpr float focal_length = 1.0f;
+
+ inline cv::Vec2f project(const cv::Vec3f& v_M)
{
cv::Vec3f v_C = X_CM * v_M;
- return cv::Vec2f(f*v_C[0]/v_C[2], f*v_C[1]/v_C[2]);
+ return cv::Vec2f(focal_length*v_C[0]/v_C[2], focal_length*v_C[1]/v_C[2]);
}
- void find_correspondences(const std::vector<cv::Vec2f>& points, float f);
- int POSIT(float f); // The POSIT algorithm, returns the number of iterations
+ PointOrder find_correspondences(const std::vector<cv::Vec2f>& projected_points, const PointModel &model);
+ int POSIT(const PointModel& point_model, const PointOrder& order); // The POSIT algorithm, returns the number of iterations
- cv::Vec2f p[PointModel::N_POINTS]; // the points in model order
FrameTrafo X_CM; // trafo from model to camera
};
diff --git a/ftnoir_tracker_pt/pt_video_widget.cpp b/ftnoir_tracker_pt/pt_video_widget.cpp
index cb3dc48e..aefb8199 100644
--- a/ftnoir_tracker_pt/pt_video_widget.cpp
+++ b/ftnoir_tracker_pt/pt_video_widget.cpp
@@ -22,24 +22,6 @@ void PTVideoWidget::update_image(const cv::Mat& frame)
freshp = true;
}
-// ----------------------------------------------------------------------------
-VideoWidgetDialog::VideoWidgetDialog(QWidget *parent, FrameProvider* provider)
- : QDialog(parent),
- video_widget(NULL)
-{
- const int VIDEO_FRAME_WIDTH = 640;
- const int VIDEO_FRAME_HEIGHT = 480;
-
- video_widget = new PTVideoWidget(this, provider);
-
- QHBoxLayout* layout = new QHBoxLayout();
- layout->setContentsMargins(0, 0, 0, 0);
- layout->addWidget(video_widget);
- if (this->layout()) delete this->layout();
- setLayout(layout);
- resize(VIDEO_FRAME_WIDTH, VIDEO_FRAME_HEIGHT);
-}
-
void PTVideoWidget::update_and_repaint()
{
QMutexLocker foo(&mtx);
diff --git a/ftnoir_tracker_pt/pt_video_widget.h b/ftnoir_tracker_pt/pt_video_widget.h
index 1be5f5f2..de2c7efb 100644
--- a/ftnoir_tracker_pt/pt_video_widget.h
+++ b/ftnoir_tracker_pt/pt_video_widget.h
@@ -7,7 +7,6 @@
#pragma once
-#include "frame_observer.h"
#include <QObject>
#include <QTime>
#include <QDialog>
@@ -24,15 +23,16 @@
#include <QPainter>
#include <QPaintEvent>
#include <QTimer>
+#include <QMutex>
+#include <QMutexLocker>
-class PTVideoWidget : public QWidget, public FrameObserver
+class PTVideoWidget : public QWidget
{
Q_OBJECT
public:
- PTVideoWidget(QWidget *parent, FrameProvider* provider) :
+ PTVideoWidget(QWidget *parent) :
QWidget(parent),
- /* to avoid linker errors */ FrameObserver(provider),
freshp(false)
{
connect(&timer, SIGNAL(timeout()), this, SLOT(update_and_repaint()));
@@ -52,20 +52,5 @@ private:
QImage texture;
QTimer timer;
cv::Mat _frame;
- bool freshp;
-};
-
-// ----------------------------------------------------------------------------
-// A VideoWidget embedded in a dialog frame
-class VideoWidgetDialog : public QDialog
-{
- Q_OBJECT
-public:
- VideoWidgetDialog(QWidget *parent, FrameProvider* provider);
- virtual ~VideoWidgetDialog() {}
-
- PTVideoWidget* get_video_widget() { return video_widget; }
-
-private:
- PTVideoWidget* video_widget;
+ volatile bool freshp;
};