summaryrefslogtreecommitdiffhomepage
path: root/tracker-pt
diff options
context:
space:
mode:
Diffstat (limited to 'tracker-pt')
-rw-r--r--tracker-pt/CMakeLists.txt5
-rw-r--r--tracker-pt/Resources/Logo_IR.pngbin0 -> 10386 bytes
-rw-r--r--tracker-pt/Resources/cap_front.pngbin0 -> 1164 bytes
-rw-r--r--tracker-pt/Resources/cap_side.pngbin0 -> 1733 bytes
-rw-r--r--tracker-pt/Resources/clip_front.pngbin0 -> 571 bytes
-rw-r--r--tracker-pt/Resources/clip_side.pngbin0 -> 2677 bytes
-rw-r--r--tracker-pt/camera.cpp145
-rw-r--r--tracker-pt/camera.h95
-rw-r--r--tracker-pt/doc/index.htm262
-rw-r--r--tracker-pt/doc/logo.pngbin0 -> 10386 bytes
-rw-r--r--tracker-pt/doc/ptrack.icobin0 -> 4286 bytes
-rw-r--r--tracker-pt/doc/settings1.pngbin0 -> 25013 bytes
-rw-r--r--tracker-pt/doc/settings2.pngbin0 -> 26841 bytes
-rw-r--r--tracker-pt/doc/settings3.pngbin0 -> 29547 bytes
-rw-r--r--tracker-pt/doc/style.css131
-rw-r--r--tracker-pt/ftnoir_tracker_pt.cpp267
-rw-r--r--tracker-pt/ftnoir_tracker_pt.h86
-rw-r--r--tracker-pt/ftnoir_tracker_pt_settings.h45
-rw-r--r--tracker-pt/point_extractor.cpp180
-rw-r--r--tracker-pt/point_extractor.h33
-rw-r--r--tracker-pt/point_tracker.cpp267
-rw-r--r--tracker-pt/point_tracker.h151
-rw-r--r--tracker-pt/pt_video_widget.cpp55
-rw-r--r--tracker-pt/pt_video_widget.h47
24 files changed, 1769 insertions, 0 deletions
diff --git a/tracker-pt/CMakeLists.txt b/tracker-pt/CMakeLists.txt
new file mode 100644
index 00000000..49943917
--- /dev/null
+++ b/tracker-pt/CMakeLists.txt
@@ -0,0 +1,5 @@
+find_package(OpenCV 3.0)
+opentrack_boilerplate(opentrack-tracker-pt STATIC)
+target_link_libraries(opentrack-tracker-pt ${OpenCV_LIBS})
+target_include_directories(opentrack-tracker-pt SYSTEM PUBLIC ${OpenCV_INCLUDE_DIRS})
+link_with_dinput8(opentrack-tracker-pt)
diff --git a/tracker-pt/Resources/Logo_IR.png b/tracker-pt/Resources/Logo_IR.png
new file mode 100644
index 00000000..95032a25
--- /dev/null
+++ b/tracker-pt/Resources/Logo_IR.png
Binary files differ
diff --git a/tracker-pt/Resources/cap_front.png b/tracker-pt/Resources/cap_front.png
new file mode 100644
index 00000000..14207a67
--- /dev/null
+++ b/tracker-pt/Resources/cap_front.png
Binary files differ
diff --git a/tracker-pt/Resources/cap_side.png b/tracker-pt/Resources/cap_side.png
new file mode 100644
index 00000000..5ad4ee65
--- /dev/null
+++ b/tracker-pt/Resources/cap_side.png
Binary files differ
diff --git a/tracker-pt/Resources/clip_front.png b/tracker-pt/Resources/clip_front.png
new file mode 100644
index 00000000..04880138
--- /dev/null
+++ b/tracker-pt/Resources/clip_front.png
Binary files differ
diff --git a/tracker-pt/Resources/clip_side.png b/tracker-pt/Resources/clip_side.png
new file mode 100644
index 00000000..72667ac7
--- /dev/null
+++ b/tracker-pt/Resources/clip_side.png
Binary files differ
diff --git a/tracker-pt/camera.cpp b/tracker-pt/camera.cpp
new file mode 100644
index 00000000..63b401a8
--- /dev/null
+++ b/tracker-pt/camera.cpp
@@ -0,0 +1,145 @@
+/* Copyright (c) 2012 Patrick Ruoff
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ */
+
+#include "camera.h"
+#include <string>
+#include <QDebug>
+#include "opentrack-compat/sleep.hpp"
+
+void Camera::set_device_index(int index)
+{
+ if (desired_index != index)
+ {
+ desired_index = index;
+ _set_device_index();
+
+ // reset fps
+ dt_valid = 0;
+ dt_mean = 0;
+ active_index = index;
+ }
+}
+
+void Camera::set_fps(int fps)
+{
+ if (cam_desired.fps != fps)
+ {
+ cam_desired.fps = fps;
+ _set_fps();
+ }
+}
+
+void Camera::set_res(int x_res, int y_res)
+{
+ if (cam_desired.res_x != x_res || cam_desired.res_y != y_res)
+ {
+ cam_desired.res_x = x_res;
+ cam_desired.res_y = y_res;
+ _set_res();
+ }
+}
+
+bool Camera::get_info(CamInfo& ret)
+{
+ if (cam_info.res_x == 0 || cam_info.res_y == 0)
+ {
+ return false;
+ }
+ ret = cam_info;
+ return true;
+}
+
+bool Camera::get_frame(float dt, cv::Mat* frame)
+{
+ bool new_frame = _get_frame(frame);
+ // measure fps of valid frames
+ const float dt_smoothing_const = 0.95;
+ dt_valid += dt;
+ if (new_frame)
+ {
+ dt_mean = dt_smoothing_const * dt_mean + (1.0 - dt_smoothing_const) * dt_valid;
+ cam_info.fps = dt_mean > 1e-3 ? 1.0 / dt_mean : 0;
+ dt_valid = 0;
+ }
+ else
+ qDebug() << "pt camera: can't get frame";
+ return new_frame;
+}
+
+void CVCamera::start()
+{
+ stop();
+ cap = new cv::VideoCapture(desired_index);
+ _set_res();
+ _set_fps();
+ // extract camera info
+ if (cap->isOpened())
+ {
+ active_index = desired_index;
+ cam_info.res_x = 0;
+ cam_info.res_y = 0;
+ } else {
+ stop();
+ }
+}
+
+void CVCamera::stop()
+{
+ if (cap)
+ {
+ const bool opened = cap->isOpened();
+ if (opened)
+ {
+ qDebug() << "pt: freeing camera";
+ cap->release();
+ }
+ delete cap;
+ cap = nullptr;
+ // give opencv time to exit camera threads, etc.
+ if (opened)
+ portable::sleep(500);
+ qDebug() << "pt camera: assuming stopped";
+ }
+}
+
+bool CVCamera::_get_frame(cv::Mat* frame)
+{
+ if (cap && cap->isOpened())
+ {
+ cv::Mat img;
+ for (int i = 0; i < 100 && !cap->read(img); i++)
+ ;;
+
+ if (img.empty())
+ return false;
+
+ *frame = img;
+ cam_info.res_x = img.cols;
+ cam_info.res_y = img.rows;
+ return true;
+ }
+ return false;
+}
+
+void CVCamera::_set_fps()
+{
+ if (cap) cap->set(CV_CAP_PROP_FPS, cam_desired.fps);
+}
+
+void CVCamera::_set_res()
+{
+ if (cap)
+ {
+ cap->set(CV_CAP_PROP_FRAME_WIDTH, cam_desired.res_x);
+ cap->set(CV_CAP_PROP_FRAME_HEIGHT, cam_desired.res_y);
+ }
+}
+void CVCamera::_set_device_index()
+{
+ if (desired_index != active_index)
+ stop();
+}
diff --git a/tracker-pt/camera.h b/tracker-pt/camera.h
new file mode 100644
index 00000000..e73d9dff
--- /dev/null
+++ b/tracker-pt/camera.h
@@ -0,0 +1,95 @@
+/* Copyright (c) 2012 Patrick Ruoff
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ */
+
+#pragma once
+
+#include <opencv2/core/core.hpp>
+#include <memory>
+#include <opencv2/highgui.hpp>
+#include <string>
+
+struct CamInfo
+{
+ CamInfo() : res_x(0), res_y(0), fps(0) {}
+
+ int res_x;
+ int res_y;
+ int fps;
+};
+
+// ----------------------------------------------------------------------------
+// Base class for cameras, calculates the frame rate
+class Camera
+{
+public:
+ Camera() : dt_valid(0), dt_mean(0), desired_index(0), active_index(-1) {}
+ virtual ~Camera() = 0;
+
+ // start/stop capturing
+ virtual void start() = 0;
+ virtual void stop() = 0;
+ void restart() { stop(); start(); }
+
+ // calls corresponding template methods and reinitializes frame rate calculation
+ void set_device_index(int index);
+ void set_fps(int fps);
+ void set_res(int x_res, int y_res);
+
+ // gets a frame from the camera, dt: time since last call in seconds
+ bool get_frame(float dt, cv::Mat* frame);
+
+ // WARNING: returned references are valid as long as object
+ bool get_info(CamInfo &ret);
+ CamInfo get_desired() const { return cam_desired; }
+
+protected:
+ // get a frame from the camera
+ virtual bool _get_frame(cv::Mat* frame) = 0;
+
+ // update the camera using cam_desired, write res and f to cam_info if successful
+ virtual void _set_device_index() = 0;
+ virtual void _set_fps() = 0;
+ virtual void _set_res() = 0;
+private:
+ float dt_valid;
+ float dt_mean;
+protected:
+ int desired_index;
+ int active_index;
+ CamInfo cam_info;
+ CamInfo cam_desired;
+};
+inline Camera::~Camera() {}
+
+// ----------------------------------------------------------------------------
+// camera based on OpenCV's videoCapture
+class CVCamera : public Camera
+{
+public:
+ CVCamera() : cap(NULL) {}
+ ~CVCamera() { stop(); }
+
+ void start() override;
+ void stop() override;
+
+ operator cv::VideoCapture*() { return cap; }
+
+protected:
+ bool _get_frame(cv::Mat* frame) override;
+ void _set_fps() override;
+ void _set_res() override;
+ void _set_device_index() override;
+private:
+ cv::VideoCapture* cap;
+};
+
+enum RotationType
+{
+ CLOCKWISE = 0,
+ ZERO = 1,
+ COUNTER_CLOCKWISE = 2
+};
diff --git a/tracker-pt/doc/index.htm b/tracker-pt/doc/index.htm
new file mode 100644
index 00000000..87b7356f
--- /dev/null
+++ b/tracker-pt/doc/index.htm
@@ -0,0 +1,262 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
+
+<head>
+ <title>FTNoIR PointTracker Help</title>
+
+ <meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1" />
+
+ <meta name="author"
+ content="Patrick Ruoff (C14)"/>
+ <meta name="keywords"
+ content="facetracknoir infrared point model tracker plugin"/>
+ <meta name="description"
+ content="Pointtracker plugin for FaceTrackNoIR"/>
+
+ <link rel="shortcut icon" href="ptrack.ico" type="image/vnd.microsoft.icon" />
+ <link rel="stylesheet" type="text/css" href="style.css" />
+</head>
+
+<body>
+<div id="navbar">
+<ul class="navbar">
+<li class="navbar"><a class="navbar" href="#about">About</a></li>
+<li class="navbar"><a class="navbar" href="#settings">Settings</a></li>
+<li class="navbar"><a class="navbar" href="#setup">Filter Setup</a></li>
+<li class="navbar"><a class="navbar" href="#support">Support</a></li>
+<li class="navbar"><a class="navbar" href="#changelog">ChangeLog</a></li>
+<li class="navbar"><a class="navbar" href="#build_instructions">Build Instructions</a></li>
+</ul>
+</div>
+
+<div id="content">
+<div style="text-align:center"><h1>FaceTrackNoIR PointTracker Plugin</h1><img src="logo.png" alt="PointTracker Plugin Logo" /></div>
+
+<a class="nav" id="about"></a>
+<h2>About</h2>
+<div class="indent">
+<p>
+PointTracker is a plugin for the free head tracking software <a href="http://facetracknoir.sourceforge.net">FaceTrackNoIR</a>
+which introduces the capability to track a (typically IR-) point model comprising 3 bright points to FaceTrackNoIR,
+much like the popular free tracking software <a href="http://www.free-track.net/">Freetrack</a> does.<br/>
+It was created as a stable modular alternative to Freetrack, which has some stability issues with newer systems and seems to be no longer actively developped.
+</p>
+</div>
+
+<a class="nav" id="settings"></a>
+<h2>Settings</h2>
+<div class="indent">
+<p>
+This section desribes the various settings of the PointTracker plugin in detail.
+</p>
+
+<img src="settings1.png" alt="Settings Pane 1"/>
+<dl>
+<dt>Show VideoWidget</dt><dd>Whether the video widget is updated or not. It may save some performance to turn this off when not needed</dd>
+<dt>Sleep time</dt><dd>Time the tracking thread sleeps after each processed image. It's inverse should be below the framefrate you want to achieve.
+(check the framerate in the status region when tracker is active, in case the sleep time is too high, the framerate will decrease).
+Low values will result in more CPU-load.</dd>
+<dt>Dynamic Pose Resolution</dt><dd>Whether the point correspondence and pose ambiquity is resolved using a more sophisticated dynamic algorithm (constant velocity prediction) or a simple static resolution.
+Dynamic pose resolution can capture more extreme poses but may occasionally get stuck in a wrong pose estimates so that a reset of the internal state becomes neccessary.</dd>
+<dt>Auto-reset time</dt><dd>If no valid tracking result can be found when using dynamic pose resolution, the tracker will automatically reset its internal state (used for resolving the pose ambiguity and point correspondence)
+and return to a fail-safe initialization phase that assumes a neutral pose after this time.
+Decrease this time, if you get stuck in a wrong pose too often.</dd>
+<dt>Reset</dt><dd>Manually reset the trackers internal state used for dynamic pose resolution and return to a fail-safe initialization phase that assumes a neutral pose.
+You may use this in case you get stuck in a wrong pose.</dd>
+<dt>Enable Axis ...</dt><dd>Which axis to use for FTNoIR.</dd>
+</dl>
+
+<img src="settings2.png" alt="Settings Pane 2"/>
+<dl>
+<dt>Device</dt><dd>The camera used for tracking.</dd>
+<dt>Resolution</dt><dd>The desired capture resolution. If your camera does not support the entered resolution the true output resolution may be different or even invalid.
+You may check the true capture resolution in the status area while the tracker is running. A higher resolution results in more accurate point positions and will increase the
+stability of the tracking result, as long as the signal/noise ratio is sufficiently high.</dd>
+<dt>FPS</dt><dd>The desired capture framerate. Again, if your camera does not support the entered framerate, the true caputre framerate may be different or invalid.
+You may check the true processing framerate in the status area while the tracker is running.</dd>
+<dt>F/W</dt><dd>The focal length of the camera divided by the sensor width (of course in the same units).
+In case you don't have access to your camera's specifications, you can measure this yourself by placing a plane object of known width (for example a piece of cardboard) in front of the camera until it fills the whole image width.
+Then measure the distance between the object and the camera and divide by the object width.</dd>
+<dt>VideoWidget</dt><dd>Shows a resizable stand-alone video widget that shows the same content as the integrated video widget in FTNoIR.
+Update rate is only 10 fps and may lag behind a bit. Mainly useful during calibration of the point extraction. Same as for the integrated wiget, to save resources, this widget should only be shown when needed.</dd>
+<dt>Roll Pitch Yaw...</dt><dd>The orientation of the camera relative to the reference frame.
+If these angles are setup properly, the direction of translations may not be correct.
+Roll is treated in a special way since it is implemented as a frame rotation by +/- 90 deg that is transparent to the rest of the processing pipeline.
+</dd>
+<dt>Threshold</dt><dd>The threshold for point recognition. Areas above the threshold are shown in blue in the VideoWidget.
+Since point accuracy is best if the points are as big as possible in pixels, the theshold should be chosen as low as possible (stop before the contour of the points becomes "noisy").
+If small reflections are being falsely classified as points, increasing the minimum point diameter (see below) may help.</dd>
+<dt>Min Diameter</dt><dd>Minimum diameter of blobs to be classified as a pointmodel-point.</dd>
+<dt>Max Diameter</dt><dd>Maximum diameter of blobs to be classified as a pointmodel-point.</dd>
+<dt>Status</dt><dd>The tracker's status is shown in this area while the tracker is running.
+The FPS shown here correspond to the framerate of the whole tracker processing chain and may be lower than what your camera is able to provide, when<br/>
+1. The processing gets not enough CPU time<br/>
+2. The sleep time of the tracking thread is set too high<br/></dd>
+</dl>
+
+<img src="settings3.png" alt="Settings Pane 3"/>
+<dl>
+<dt>Model Selection and Dimensions ...</dt><dd>
+First select your model type (point, clip, custom), then enter the dimensions of your model in milimeters here.<br/>
+For the custom setting, the coordinates of the two remaining model points have to be entered (reference point M0 is at (0,0,0)) in a pose where the model roughly faces the camera.
+For orientation, the coordinates for the standard Freetrack clip are (0,40,-30), (0,-70,-80) and the ones for the cap (40,-60,-100), (-40,-60,-100).<br/>
+When using a custom point-model configuration, the following restrictions should be observed:<br/>
+The plane in which the 3 points lie should never be parallel to the image plane, M0-M1 and M0-M2 should be roughly perpendicular.</dd>
+
+<dt>Model Position</dt><dd>The vector from the model to the center of the head in the model frame. Can be calibrated automatically.</dd>
+<dt>Calibrate</dt><dd>In order to automatically calibrate the model-head offset, do the following:<br/>Press the Calibrate button, then look around while not moving your shoulder. (i.e. only rotation, no translation).
+Do not stay in one pose for too long. The current translation estimate will be updated in real time. As soon as the values stabilized sufficiently, press the Calibrate button again to stop the calibration process.</dd>
+</dl>
+</div>
+
+<a class="nav" id="setup"></a>
+<h2>Filter Setup</h2>
+<div class="indent">
+<p>
+This section desribes how the FTNoIR filter work and what the recommended settings for PointTracker are.
+</p>
+<p>
+Filtering is always a tradeoff between stability, accuracy and responsiveness.
+</p>
+<p>
+The <q>Smoothing</q> filter in FTNoIR is just a simple average over the last n samples.
+Since this filter produces input lag no matter how fast the head-movements are, it is recommended to turn it off by setting samples to 1.
+</p>
+<p>
+In the filter tab, it is recommended to select <q>Accela Filter Mk2</q>.
+Accela is a non-linear filter that works as follows:<br/>
+It looks at the difference between the new raw values <i>new_val</i> from the tracker and the last filtered value <i>old_val</i>
+and maps this difference via the customizable response function <i>f</i> via:<br/>
+</p>
+<p style="text-align: center">
+<i>new_val = old_val + f(new_val - old_val) / reduction_factor</i>
+</p>
+<p>
+So by setting <i>f(x) = reduction_factor * x</i>, one will get no filtering at all.<br/>
+If you set lower values for small x, small deviations (usually noise) will get dampened.
+This results in a dynamic dead-zone around the current position.
+</p>
+<p>
+The last two points are used by accela to extrapolate for large deviations.
+So in order to get a fast unfiltered response for large deviations, the line connecting the last two points should have a slope >= <i>reduction_factor</i>.
+</p>
+<p>
+More aggressive accela settings than the default FTNoIR accela settings are recommended in order to decrease the filtering lag and fully use the potential of point tracking.<br/>
+My current settings are:
+</p>
+<pre class="indent"><code>
+[Accela]
+Reduction=20
+
+[Curves-Accela-Scaling-Rotation]
+point-count=4
+point-0-x=0.1
+point-0-y=0
+point-1-x=1.43
+point-1-y=2.45
+point-2-x=2.0
+point-2-y=5.44
+point-3-x=2.06
+point-3-y=6
+</code></pre>
+<p>
+The curve is not too different from the standard one (except that I like a small dynamic dead zone for steady aiming, that's why the curve has a slope of 0 at the beginning).<br/>
+However, the reduction factor is decreased to a value of 20 (compared to the standard value of 100). This implies that each value of the curve is effectively 5 times higher than in standard FTNoIR (see formula above), which means higher responsiveness but can also lead to jitter/shaking.<br/>
+Keep in mind that there are no <q>best filter settings</q>. Since filtering is always a compromise it's a matter of personal taste and
+playing around with the filter settings is highly recommended.
+</p>
+</div>
+
+<a class="nav" id="support"></a>
+<h2>Support</h2>
+<div class="indent">
+<p>
+For questions/feedback about the plugin, post to the <a href="https://sourceforge.net/projects/facetracknoir/forums">FTNoIR-Forum</a>.<br/>
+In case you like this plugin and would like to support the author, you may consider making a donation.
+</p>
+<div style="text-align:center">
+<form action="https://www.paypal.com/cgi-bin/webscr" method="post">
+<fieldset class="blind">
+<input type="hidden" name="cmd" value="_s-xclick"/>
+<input type="hidden" name="encrypted" value="-----BEGIN PKCS7-----MIIHJwYJKoZIhvcNAQcEoIIHGDCCBxQCAQExggEwMIIBLAIBADCBlDCBjjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRQwEgYDVQQKEwtQYXlQYWwgSW5jLjETMBEGA1UECxQKbGl2ZV9jZXJ0czERMA8GA1UEAxQIbGl2ZV9hcGkxHDAaBgkqhkiG9w0BCQEWDXJlQHBheXBhbC5jb20CAQAwDQYJKoZIhvcNAQEBBQAEgYCa+2zPZ+6vFPqveJsBIjFLpy54m7tl0AdojRr/K5qa3QJDyRBhGwGAP2jRihkmZFE2oKlfLpkz7nrwOQY/wFEPkggO+cABxUfjcQVpIupHEtwdV0hMklLs0RmACJy802yfi1yTiCpJ4hvWN+VfUI3gOiZ9uRZ3L4iGXES7xtqJbDELMAkGBSsOAwIaBQAwgaQGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQIeopHzcJ8XBOAgYCYJFyTejSplEOwF21aQ01qQOads9Z+RUVI+hlvM/pHTjimaZPKSis3poAeqv6wKn40DpLNxDnmcT+Y9KXhrV+Gy4GZCPaeNzq2vquQ2ZVN0fTr84QVmKqPkjMBGmJAHSLCcZswUddemJgoD1uyvS0kNbchvxw7gDXJnJeBRNyXXKCCA4cwggODMIIC7KADAgECAgEAMA0GCSqGSIb3DQEBBQUAMIGOMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDU1vdW50YWluIFZpZXcxFDASBgNVBAoTC1BheVBhbCBJbmMuMRMwEQYDVQQLFApsaXZlX2NlcnRzMREwDwYDVQQDFAhsaXZlX2FwaTEcMBoGCSqGSIb3DQEJARYNcmVAcGF5cGFsLmNvbTAeFw0wNDAyMTMxMDEzMTVaFw0zNTAyMTMxMDEzMTVaMIGOMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDU1vdW50YWluIFZpZXcxFDASBgNVBAoTC1BheVBhbCBJbmMuMRMwEQYDVQQLFApsaXZlX2NlcnRzMREwDwYDVQQDFAhsaXZlX2FwaTEcMBoGCSqGSIb3DQEJARYNcmVAcGF5cGFsLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwUdO3fxEzEtcnI7ZKZL412XvZPugoni7i7D7prCe0AtaHTc97CYgm7NsAtJyxNLixmhLV8pyIEaiHXWAh8fPKW+R017+EmXrr9EaquPmsVvTywAAE1PMNOKqo2kl4Gxiz9zZqIajOm1fZGWcGS0f5JQ2kBqNbvbg2/Za+GJ/qwUCAwEAAaOB7jCB6zAdBgNVHQ4EFgQUlp98u8ZvF71ZP1LXChvsENZklGswgbsGA1UdIwSBszCBsIAUlp98u8ZvF71ZP1LXChvsENZklGuhgZSkgZEwgY4xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzEUMBIGA1UEChMLUGF5UGFsIEluYy4xEzARBgNVBAsUCmxpdmVfY2VydHMxETAPBgNVBAMUCGxpdmVfYXBpMRwwGgYJKoZIhvcNAQkBFg1yZUBwYXlwYWwuY29tggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEAgV86VpqAWuXvX6Oro4qJ1tYVIT5DgWpE692Ag422H7yRIr/9j/iKG4Thia/Oflx4TdL+IFJBAyPK9v6zZNZtBgPBynXb048hsP16l2vi0k5Q2JKiPDsEfBhGI+HnxLXEaUWAcVfCsQFvd2A1sxRr67ip5y2wwBelUecP3AjJ+YcxggGaMIIBlgIBATCBlDCBjjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRQwEgYDVQQKEwtQYXlQYWwgSW5jLjETMBEGA1UECxQKbGl2ZV9jZXJ0czERMA8GA1UEAxQIbGl2ZV9hcGkxHDAaBgkqhkiG9w0BCQEWDXJlQHBheXBhbC5jb20CAQAwCQYFKw4DAhoFAKBdMBgGCSqGSIb3DQEJAzELBgkqhkiG9w0BBwEwHAYJKoZIhvcNAQkFMQ8XDTEyMDkyMzA5NTcwOFowIwYJKoZIhvcNAQkEMRYEFG/qW7uo4R4m5uFYegcZaZsTPAcUMA0GCSqGSIb3DQEBAQUABIGAGygLfrR6IQbG2xZY2OrwKkfmRwiwtnXpLBnSbnWb7XxUOMhvM6962RiKBQBGP0+XYw0S9yu8ZHx7tqz/3bcMfGjtz7PwixYx6Rm8Z29ja78aUy5FmU7fc9yAWFxLHptSliK1dJBPxdQa9J2YSDvPQPAj+AdB9sJvqJoMoxTFGM4=-----END PKCS7-----
+"/>
+<input type="image" src="https://www.paypalobjects.com/en_US/i/btn/btn_donateCC_LG.gif" name="submit" alt="PayPal - The safer, easier way to pay online!"/>
+<img alt="" src="https://www.paypalobjects.com/de_DE/i/scr/pixel.gif" width="1" height="1"/>
+</fieldset>
+</form>
+</div>
+</div>
+
+<a class="nav" id="changelog"></a>
+<h2>ChangeLog</h2>
+<div class="indent">
+<h3>1.1</h3>
+<ul>
+<li>Added camera yaw and roll correction (intended for vertically mounted cameras)</li>
+<li>Improved point extraction algorithm, thanks to Michael Welter</li>
+<li>UI improvements: Select camera by device name, different VideoWidget architecture</li>
+<li>Bugfixes: Removed 99 FPS limitation</li>
+</ul>
+
+<h3>1.0</h3>
+<ul>
+<li>Added camera pitch correction</li>
+<li>Better communication with FTNoIR: output axis configuration, status report</li>
+</ul>
+
+<h3>1.0 beta</h3>
+<ul>
+<li>Switchted to videoInput library for capture. Desired capture resolution and fps can now be customized</li>
+<li>Introduced dynamic point-correspondence and POSIT-ambiguity resolution, which allows for the reconstruction of more extreme poses</li>
+<li>More convenient freetrack-like model dimension GUI</li>
+<li>Bugfixes: VideoWidget skipping frames, Timer resolution too low for accurate FPS measurement</li>
+</ul>
+</div>
+
+<a class="nav" id="build_instructions"></a>
+<h2>Build Instructions</h2>
+<div class="indent">
+<p>
+This section describes what you need to do in order to build PointTracker yourself.<br/>
+You can find the sources at the <a href="https://sourceforge.net/projects/ftnoirpt/">project site</a>
+or as part of the <a href="https://sourceforge.net/projects/facetracknoir/">FTNoIR sources</a>.
+</p>
+<p> The project was created with Visual Studio. </p>
+
+<h3>Dependencies</h3>
+<ul>
+<li>Qt 4.8.2 library</li>
+<li>Qt plugin for Visual studio</li>
+<li>OpenCV 2.4 prebuilt for Windows</li>
+<li>Boost 1.47</li>
+</ul>
+
+<h3>Details</h3>
+<div class="indent">
+<h4>Common</h4>
+<ul>
+<li>setup environment variable "QTDIR" (example value "D:\Devel\Libs\Qt\4.8.2")</li>
+<li>add "%QTDIR%\bin" to PATH</li>
+<li>setup environment variable "BOOST_DIR" (example value "D:\Devel\Libs\boost_1_47_0")</li>
+<li>setup environment variable "OPENCV_DIR" (example value "D:\Devel\Libs\opencv\build")</li>
+</ul>
+<h4>Debug</h4>
+<p>opencv linked dynamically:</p>
+<ul>
+<li>add "%OPENCV_DIR%\x86\vc9\bin" to PATH</li>
+</ul>
+<p>(in case of different Visual studio, change PATH and linker dependencies accordingly)</p>
+<h4>Release</h4>
+<p>opencv linked statically:</p>
+<ul>
+<li>custom build a statically linked version of opencv with the buil-option BUILD_WITH_STATIC_CRT set to OFF!</li>
+<li>copy resulting libaries to "%OPENCV_DIR%\x86\vc9\static_lib"</li>
+</ul>
+<p>(in case of different Visual studio, change PATH and linker dependencies accordingly)</p>
+</div>
+</div>
+
+</div>
+
+</body>
+</html> \ No newline at end of file
diff --git a/tracker-pt/doc/logo.png b/tracker-pt/doc/logo.png
new file mode 100644
index 00000000..95032a25
--- /dev/null
+++ b/tracker-pt/doc/logo.png
Binary files differ
diff --git a/tracker-pt/doc/ptrack.ico b/tracker-pt/doc/ptrack.ico
new file mode 100644
index 00000000..c4b2aedc
--- /dev/null
+++ b/tracker-pt/doc/ptrack.ico
Binary files differ
diff --git a/tracker-pt/doc/settings1.png b/tracker-pt/doc/settings1.png
new file mode 100644
index 00000000..35b84c5c
--- /dev/null
+++ b/tracker-pt/doc/settings1.png
Binary files differ
diff --git a/tracker-pt/doc/settings2.png b/tracker-pt/doc/settings2.png
new file mode 100644
index 00000000..c6cfd1f3
--- /dev/null
+++ b/tracker-pt/doc/settings2.png
Binary files differ
diff --git a/tracker-pt/doc/settings3.png b/tracker-pt/doc/settings3.png
new file mode 100644
index 00000000..5922403d
--- /dev/null
+++ b/tracker-pt/doc/settings3.png
Binary files differ
diff --git a/tracker-pt/doc/style.css b/tracker-pt/doc/style.css
new file mode 100644
index 00000000..0c3d29a6
--- /dev/null
+++ b/tracker-pt/doc/style.css
@@ -0,0 +1,131 @@
+body {
+ width: 1000px;
+ font-size: 13px;
+ color: #000000;
+ padding: 0;
+ margin: 0 auto;
+ background: #444444;
+ font-family: verdana,arial;
+}
+
+table {
+ border-width: 3px;
+ border-color: #0000FF;
+ border-style: ridge;
+ margin-top: 5px;
+ background-color: #E0E0FF;
+}
+
+table.blind {
+ border: none;
+ background-color: #E6E6E6;
+}
+
+fieldset.blind {
+ border: none;
+}
+
+h1 { font-size: 160%; }
+h2 { font-size: 140%; }
+h3 { font-size: 115%; }
+
+.indent {
+ margin-left: 25px;
+}
+
+p
+{
+ margin-left: 10px;
+}
+
+li
+{
+ margin: 10px;
+}
+
+
+dl
+{
+ /*width: 80%;*/
+ border-bottom: 1px solid #999;
+}
+
+dt
+{
+ padding-top: 5px;
+ font-weight: bold;
+ border-top: 1px solid #999;
+}
+
+dd
+{
+ padding: 5px;
+}
+
+
+hr {
+ color: #688938;
+}
+
+a:link, a:visited {
+ color: #0000BF;
+}
+a:hover {
+ color: #0000FF;
+}
+
+a.nav {
+ position: relative;
+ top: -30px;
+ display: block;
+ visibility: hidden;
+}
+
+#navbar {
+ width: 1000px;
+ height: 30px;
+ background-color:#1a1a1b;
+ position: fixed;
+ margin: 0 auto;
+ padding: 0;
+}
+
+#navbar ul
+{
+ list-style-type: none;
+ margin: 0 auto;
+ padding: 0;
+ overflow: hidden;
+}
+
+#navbar li
+{
+ margin: 0 auto;
+ padding: 5px;
+ float:left;
+}
+
+#navbar a:link,a:visited
+{
+ display:block;
+ width:150px;
+ font-weight:bold;
+ color:#e85d02;
+ text-align:center;
+ /*padding:4px;*/
+ text-decoration:none;
+ /*text-transform:uppercase;*/
+}
+
+#navbar a:hover,a:active
+{
+ color:#ffffff;
+}
+
+#content {
+ background-color:#ffffff;
+ padding: 15px;
+ padding-top: 40px;
+ padding-right: 40px;
+ margin: 0 auto;
+}
diff --git a/tracker-pt/ftnoir_tracker_pt.cpp b/tracker-pt/ftnoir_tracker_pt.cpp
new file mode 100644
index 00000000..956f639e
--- /dev/null
+++ b/tracker-pt/ftnoir_tracker_pt.cpp
@@ -0,0 +1,267 @@
+/* Copyright (c) 2012 Patrick Ruoff
+ * Copyright (c) 2014-2015 Stanislaw Halik <sthalik@misaki.pl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ */
+
+#include "ftnoir_tracker_pt.h"
+#include <QHBoxLayout>
+#include <cmath>
+#include <QDebug>
+#include <QFile>
+#include <QCoreApplication>
+#include "opentrack/camera-names.hpp"
+
+//#define PT_PERF_LOG //log performance
+
+//-----------------------------------------------------------------------------
+Tracker_PT::Tracker_PT()
+ : mutex(QMutex::Recursive),
+ commands(0),
+ video_widget(NULL),
+ video_frame(NULL),
+ ever_success(false)
+{
+ connect(s.b.get(), SIGNAL(saving()), this, SLOT(apply_settings()));
+}
+
+Tracker_PT::~Tracker_PT()
+{
+ set_command(ABORT);
+ wait();
+ delete video_widget;
+ video_widget = NULL;
+ if (video_frame->layout()) delete video_frame->layout();
+ camera.stop();
+}
+
+void Tracker_PT::set_command(Command command)
+{
+ //QMutexLocker lock(&mutex);
+ commands |= command;
+}
+
+void Tracker_PT::reset_command(Command command)
+{
+ //QMutexLocker lock(&mutex);
+ commands &= ~command;
+}
+
+bool Tracker_PT::get_focal_length(float& ret)
+{
+ static constexpr float pi = 3.1415926;
+ float fov_;
+ switch (s.fov)
+ {
+ default:
+ case 0:
+ fov_ = 56;
+ break;
+ case 1:
+ fov_ = 75;
+ break;
+ }
+
+ const double diag_fov = static_cast<int>(fov_) * pi / 180.f;
+ QMutexLocker l(&camera_mtx);
+ CamInfo info;
+ const bool res = camera.get_info(info);
+ if (res)
+ {
+ const int w = info.res_x, h = info.res_y;
+ const double diag = sqrt(1. + h/(double)w * h/(double)w);
+ const double fov = 2.*atan(tan(diag_fov/2.0)/diag);
+ ret = .5 / tan(.5 * fov);
+ return true;
+ }
+ return false;
+}
+
+void Tracker_PT::run()
+{
+#ifdef PT_PERF_LOG
+ QFile log_file(QCoreApplication::applicationDirPath() + "/PointTrackerPerformance.txt");
+ if (!log_file.open(QIODevice::WriteOnly | QIODevice::Text)) return;
+ QTextStream log_stream(&log_file);
+#endif
+
+ apply_settings();
+
+ while((commands & ABORT) == 0)
+ {
+ const double dt = time.elapsed() * 1e-9;
+ time.start();
+ cv::Mat frame;
+ bool new_frame;
+
+ {
+ QMutexLocker l(&camera_mtx);
+ new_frame = camera.get_frame(dt, &frame);
+ }
+
+ if (new_frame && !frame.empty())
+ {
+ QMutexLocker lock(&mutex);
+
+ std::vector<cv::Vec2f> points = point_extractor.extract_points(frame);
+
+ // blobs are sorted in order of circularity
+ if (points.size() > PointModel::N_POINTS)
+ points.resize(PointModel::N_POINTS);
+
+ bool success = points.size() == PointModel::N_POINTS;
+
+ float fx;
+ if (!get_focal_length(fx))
+ continue;
+
+ if (success)
+ {
+ point_tracker.track(points, PointModel(s), fx, s.dynamic_pose, s.init_phase_timeout);
+ }
+
+ Affine X_CM = pose();
+
+ ever_success |= success;
+
+ {
+ Affine X_MH(cv::Matx33f::eye(), cv::Vec3f(s.t_MH_x, s.t_MH_y, s.t_MH_z)); // just copy pasted these lines from below
+ if (X_MH.t[0] == 0 && X_MH.t[1] == 0 && X_MH.t[2] == 0)
+ {
+ int m = s.model_used;
+ switch (m)
+ {
+ default:
+ // cap
+ case 0: X_MH.t[0] = 0; X_MH.t[1] = 0; X_MH.t[2] = 0; break;
+ // clip
+ case 1: X_MH.t[0] = 135; X_MH.t[1] = 0; X_MH.t[2] = 0; break;
+ // left clip
+ case 2: X_MH.t[0] = -135; X_MH.t[1] = 0; X_MH.t[2] = 0; break;
+ }
+ }
+ Affine X_GH = X_CM * X_MH;
+ cv::Vec3f p = X_GH.t; // head (center?) position in global space
+ cv::Vec2f p_(p[0] / p[2] * fx, p[1] / p[2] * fx); // projected to screen
+ points.push_back(p_);
+ }
+
+ for (unsigned i = 0; i < points.size(); i++)
+ {
+ auto& p = points[i];
+ auto p2 = cv::Point(p[0] * frame.cols + frame.cols/2, -p[1] * frame.cols + frame.rows/2);
+ cv::Scalar color(0, 255, 0);
+ if (i == points.size()-1)
+ color = cv::Scalar(0, 0, 255);
+ cv::line(frame,
+ cv::Point(p2.x - 20, p2.y),
+ cv::Point(p2.x + 20, p2.y),
+ color,
+ 4);
+ cv::line(frame,
+ cv::Point(p2.x, p2.y - 20),
+ cv::Point(p2.x, p2.y + 20),
+ color,
+ 4);
+ }
+
+ video_widget->update_image(frame);
+ }
+#ifdef PT_PERF_LOG
+ log_stream<<"dt: "<<dt;
+ if (!frame.empty()) log_stream<<" fps: "<<camera.get_info().fps;
+ log_stream<<"\n";
+#endif
+ }
+ qDebug()<<"Tracker:: Thread stopping";
+}
+
+void Tracker_PT::apply_settings()
+{
+ qDebug()<<"Tracker:: Applying settings";
+ QMutexLocker l(&camera_mtx);
+ camera.set_device_index(camera_name_to_index("PS3Eye Camera"));
+ int res_x, res_y, cam_fps;
+ switch (s.camera_mode)
+ {
+ default:
+ case 0:
+ res_x = 640;
+ res_y = 480;
+ cam_fps = 75;
+ break;
+ case 1:
+ res_x = 640;
+ res_y = 480;
+ cam_fps = 60;
+ break;
+ case 2:
+ res_x = 320;
+ res_y = 240;
+ cam_fps = 189;
+ break;
+ case 3:
+ res_x = 320;
+ res_y = 240;
+ cam_fps = 120;
+ break;
+ }
+
+ camera.set_res(res_x, res_y);
+ camera.set_fps(cam_fps);
+ qDebug() << "camera start";
+ camera.start();
+ qDebug()<<"Tracker::apply ends";
+}
+
+void Tracker_PT::start_tracker(QFrame *parent_window)
+{
+ this->video_frame = parent_window;
+ video_frame->setAttribute(Qt::WA_NativeWindow);
+ video_frame->show();
+ video_widget = new PTVideoWidget(video_frame);
+ QHBoxLayout* video_layout = new QHBoxLayout(parent_window);
+ video_layout->setContentsMargins(0, 0, 0, 0);
+ video_layout->addWidget(video_widget);
+ video_frame->setLayout(video_layout);
+ video_widget->resize(video_frame->width(), video_frame->height());
+ start();
+}
+
+void Tracker_PT::data(double *data)
+{
+ if (ever_success)
+ {
+ Affine X_CM = pose();
+
+ Affine X_MH(cv::Matx33f::eye(), cv::Vec3f(s.t_MH_x, s.t_MH_y, s.t_MH_z));
+ Affine X_GH = X_CM * X_MH;
+
+ cv::Matx33f R = X_GH.R;
+ cv::Vec3f t = X_GH.t;
+
+ // translate rotation matrix from opengl (G) to roll-pitch-yaw (E) frame
+ // -z -> x, y -> z, x -> -y
+ cv::Matx33f R_EG(0, 0,-1,
+ -1, 0, 0,
+ 0, 1, 0);
+ R = R_EG * R * R_EG.t();
+
+ // extract rotation angles
+ float alpha, beta, gamma;
+ beta = atan2( -R(2,0), sqrt(R(2,1)*R(2,1) + R(2,2)*R(2,2)) );
+ alpha = atan2( R(1,0), R(0,0));
+ gamma = atan2( R(2,1), R(2,2));
+
+ // extract rotation angles
+ data[Yaw] = rad2deg * alpha;
+ data[Pitch] = -rad2deg * beta;
+ data[Roll] = rad2deg * gamma;
+ // get translation(s)
+ data[TX] = t[0] / 10.0; // convert to cm
+ data[TY] = t[1] / 10.0;
+ data[TZ] = t[2] / 10.0;
+ }
+}
diff --git a/tracker-pt/ftnoir_tracker_pt.h b/tracker-pt/ftnoir_tracker_pt.h
new file mode 100644
index 00000000..f73d106b
--- /dev/null
+++ b/tracker-pt/ftnoir_tracker_pt.h
@@ -0,0 +1,86 @@
+/* Copyright (c) 2012 Patrick Ruoff
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ */
+
+#ifndef FTNOIR_TRACKER_PT_H
+#define FTNOIR_TRACKER_PT_H
+
+#include "opentrack/plugin-api.hpp"
+#include "ftnoir_tracker_pt_settings.h"
+#include "camera.h"
+#include "point_extractor.h"
+#include "point_tracker.h"
+#include "pt_video_widget.h"
+#include "opentrack-compat/timer.hpp"
+#include "opentrack/opencv-camera-dialog.hpp"
+
+#include <QThread>
+#include <QMutex>
+#include <QMutexLocker>
+#include <QTime>
+#include <atomic>
+#include <memory>
+#include <vector>
+
+class TrackerDialog_PT;
+
+//-----------------------------------------------------------------------------
+// Constantly processes the tracking chain in a separate thread
+class Tracker_PT : public QThread, public ITracker
+{
+ Q_OBJECT
+ friend class camera_dialog<Tracker_PT>;
+ friend class TrackerDialog_PT;
+public:
+ Tracker_PT();
+ ~Tracker_PT() override;
+ void start_tracker(QFrame* parent_window) override;
+ void data(double* data) override;
+
+ Affine pose() { QMutexLocker lock(&mutex); return point_tracker.pose(); }
+ int get_n_points() { QMutexLocker lock(&mutex); return point_extractor.get_points().size(); }
+ bool get_cam_info(CamInfo* info) { QMutexLocker lock(&camera_mtx); return camera.get_info(*info); }
+public slots:
+ void apply_settings();
+protected:
+ void run() override;
+private:
+ QMutex mutex;
+ // thread commands
+ enum Command {
+ ABORT = 1<<0
+ };
+ void set_command(Command command);
+ void reset_command(Command command);
+
+ bool get_focal_length(float &ret);
+
+ volatile int commands;
+
+ QMutex camera_mtx;
+ CVCamera camera;
+ PointExtractor point_extractor;
+ PointTracker point_tracker;
+
+ PTVideoWidget* video_widget;
+ QFrame* video_frame;
+
+ settings_pt s;
+ Timer time;
+
+ volatile bool ever_success;
+
+ static constexpr double rad2deg = 180.0/3.14159265;
+ static constexpr double deg2rad = 3.14159265/180.0;
+};
+
+class TrackerDll : public Metadata
+{
+ QString name() { return QString("PointTracker 1.1"); }
+ QIcon icon() { return QIcon(":/Resources/Logo_IR.png"); }
+};
+
+#endif // FTNOIR_TRACKER_PT_H
diff --git a/tracker-pt/ftnoir_tracker_pt_settings.h b/tracker-pt/ftnoir_tracker_pt_settings.h
new file mode 100644
index 00000000..78626468
--- /dev/null
+++ b/tracker-pt/ftnoir_tracker_pt_settings.h
@@ -0,0 +1,45 @@
+/* Copyright (c) 2012 Patrick Ruoff
+ * Copyright (c) 2014-2015 Stanislaw Halik <sthalik@misaki.pl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ */
+
+#ifndef FTNOIR_TRACKER_PT_SETTINGS_H
+#define FTNOIR_TRACKER_PT_SETTINGS_H
+
+#include "opentrack/options.hpp"
+using namespace options;
+
+struct settings_pt : opts
+{
+ value<int> threshold;
+ value<double> min_point_size, max_point_size;
+
+ value<int> t_MH_x, t_MH_y, t_MH_z;
+ value<int> fov, camera_mode;
+ value<int> model_used;
+
+ value<bool> dynamic_pose;
+ value<int> init_phase_timeout;
+ value<bool> auto_threshold;
+
+ settings_pt() :
+ opts("tracker-pt"),
+ threshold(b, "threshold-primary", 128),
+ min_point_size(b, "min-point-size", 0),
+ max_point_size(b, "max-point-size", 50),
+ t_MH_x(b, "model-centroid-x", 0),
+ t_MH_y(b, "model-centroid-y", 0),
+ t_MH_z(b, "model-centroid-z", 0),
+ fov(b, "camera-fov", 0),
+ camera_mode(b, "camera-mode", 0),
+ model_used(b, "model-used", 0),
+ dynamic_pose(b, "dynamic-pose-resolution", true),
+ init_phase_timeout(b, "init-phase-timeout", 500),
+ auto_threshold(b, "automatic-threshold", false)
+ {}
+};
+
+#endif //FTNOIR_TRACKER_PT_SETTINGS_H
diff --git a/tracker-pt/point_extractor.cpp b/tracker-pt/point_extractor.cpp
new file mode 100644
index 00000000..ec37dd00
--- /dev/null
+++ b/tracker-pt/point_extractor.cpp
@@ -0,0 +1,180 @@
+/* Copyright (c) 2012 Patrick Ruoff
+ * Copyright (c) 2014-2015 Stanislaw Halik <sthalik@misaki.pl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ */
+
+#include "point_extractor.h"
+#include <QDebug>
+
+#ifdef DEBUG_EXTRACTION
+# include "opentrack-compat/timer.hpp"
+#endif
+
+PointExtractor::PointExtractor(){
+ //if (!AllocConsole()){}
+ //else SetConsoleTitle("debug");
+ //freopen("CON", "w", stdout);
+ //freopen("CON", "w", stderr);
+}
+// ----------------------------------------------------------------------------
+std::vector<cv::Vec2f> PointExtractor::extract_points(cv::Mat& frame)
+{
+ const int W = frame.cols;
+ const int H = frame.rows;
+
+ // convert to grayscale
+ cv::Mat frame_gray;
+ cv::cvtColor(frame, frame_gray, cv::COLOR_RGB2GRAY);
+
+ const double region_size_min = s.min_point_size;
+ const double region_size_max = s.max_point_size;
+
+ struct blob
+ {
+ double radius;
+ cv::Vec2d pos;
+ double confid;
+ bool taken;
+ double area;
+ blob(double radius, const cv::Vec2d& pos, double confid, double area) : radius(radius), pos(pos), confid(confid), taken(false), area(area)
+ {
+ //qDebug() << "radius" << radius << "pos" << pos[0] << pos[1] << "confid" << confid;
+ }
+ bool inside(const blob& other)
+ {
+ cv::Vec2d tmp = pos - other.pos;
+ return sqrt(tmp.dot(tmp)) < radius;
+ }
+ };
+
+ // mask for everything that passes the threshold (or: the upper threshold of the hysteresis)
+ cv::Mat frame_bin = cv::Mat::zeros(H, W, CV_8U);
+
+ std::vector<blob> blobs;
+ std::vector<std::vector<cv::Point>> contours;
+
+ const int thres = s.threshold;
+ if (!s.auto_threshold)
+ {
+ cv::Mat frame_bin_;
+ cv::threshold(frame_gray, frame_bin_, thres, 255, cv::THRESH_BINARY);
+ frame_bin.setTo(170, frame_bin_);
+ cv::findContours(frame_bin_, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
+ }
+ else
+ {
+ cv::Mat hist;
+ cv::calcHist(std::vector<cv::Mat> { frame_gray },
+ std::vector<int> { 0 },
+ cv::Mat(),
+ hist,
+ std::vector<int> { 256 },
+ std::vector<float> { 0, 256 },
+ false);
+ const int sz = hist.rows*hist.cols;
+ int val = 0;
+ int cnt = 0;
+ constexpr int min_pixels = 250;
+ const auto pixels_to_include = std::max<int>(0, min_pixels * s.threshold/100.);
+ for (int i = sz-1; i >= 0; i--)
+ {
+ cnt += hist.at<float>(i);
+ if (cnt >= pixels_to_include)
+ {
+ val = i;
+ break;
+ }
+ }
+ val *= 240./256.;
+ //qDebug() << "val" << val;
+
+ cv::Mat frame_bin_;
+ cv::threshold(frame_gray, frame_bin_, val, 255, CV_THRESH_BINARY);
+ frame_bin.setTo(170, frame_bin_);
+ cv::findContours(frame_bin_, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
+ }
+
+ int cnt = 0;
+
+ for (auto& c : contours)
+ {
+ if (cnt++ > 30)
+ break;
+
+ const auto m = cv::moments(cv::Mat(c));
+ const double area = m.m00;
+ if (area == 0.)
+ continue;
+ const cv::Vec2d pos(m.m10 / m.m00, m.m01 / m.m00);
+
+ double radius;
+// following based on OpenCV SimpleBlobDetector
+ {
+ std::vector<double> dists;
+ for (auto& k : c)
+ {
+ dists.push_back(cv::norm(pos - cv::Vec2d(k.x, k.y)));
+ }
+ std::sort(dists.begin(), dists.end());
+ radius = (dists[(dists.size() - 1)/2] + dists[dists.size()/2])/2;
+ }
+
+ if (radius < region_size_min || radius > region_size_max)
+ continue;
+
+ double confid = 1;
+ {
+ double denominator = std::sqrt(std::pow(2 * m.mu11, 2) + std::pow(m.mu20 - m.mu02, 2));
+ const double eps = 1e-2;
+ if (denominator > eps)
+ {
+ double cosmin = (m.mu20 - m.mu02) / denominator;
+ double sinmin = 2 * m.mu11 / denominator;
+ double cosmax = -cosmin;
+ double sinmax = -sinmin;
+
+ double imin = 0.5 * (m.mu20 + m.mu02) - 0.5 * (m.mu20 - m.mu02) * cosmin - m.mu11 * sinmin;
+ double imax = 0.5 * (m.mu20 + m.mu02) - 0.5 * (m.mu20 - m.mu02) * cosmax - m.mu11 * sinmax;
+ confid = imin / imax;
+ }
+ }
+// end SimpleBlobDetector
+
+ {
+ char buf[64];
+ sprintf(buf, "%.2fpx %.2fc", radius, confid);
+ cv::putText(frame, buf, cv::Point(pos[0]+30, pos[1]+20), cv::FONT_HERSHEY_DUPLEX, 1, cv::Scalar(0, 0, 255), 1);
+ }
+
+ blobs.push_back(blob(radius, pos, confid, area));
+ }
+
+ // clear old points
+ points.clear();
+
+ using b = const blob;
+ std::sort(blobs.begin(), blobs.end(), [](b& b1, b& b2) {return b1.confid > b2.confid;});
+
+ for (auto& b : blobs)
+ {
+ cv::Vec2f p((b.pos[0] - W/2)/W, -(b.pos[1] - H/2)/W);
+ points.push_back(p);
+ }
+
+ // draw output image
+ std::vector<cv::Mat> channels_;
+ cv::split(frame, channels_);
+ std::vector<cv::Mat> channels;
+ {
+ cv::Mat frame_bin__ = frame_bin * .5;
+ channels.push_back(channels_[0] + frame_bin__);
+ channels.push_back(channels_[1] - frame_bin__);
+ channels.push_back(channels_[2] - frame_bin__);
+ cv::merge(channels, frame);
+ }
+
+ return points;
+}
diff --git a/tracker-pt/point_extractor.h b/tracker-pt/point_extractor.h
new file mode 100644
index 00000000..b9368ab6
--- /dev/null
+++ b/tracker-pt/point_extractor.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2012 Patrick Ruoff
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ */
+
+#ifndef POINTEXTRACTOR_H
+#define POINTEXTRACTOR_H
+
+#include <opencv2/core/core.hpp>
+#include <opencv2/imgproc/imgproc.hpp>
+
+#include "ftnoir_tracker_pt_settings.h"
+
+// ----------------------------------------------------------------------------
+// Extracts points from an opencv image
+class PointExtractor
+{
+public:
+ // extracts points from frame and draws some processing info into frame, if draw_output is set
+ // dt: time since last call in seconds
+ // WARNING: returned reference is valid as long as object
+ std::vector<cv::Vec2f> extract_points(cv::Mat &frame);
+ const std::vector<cv::Vec2f>& get_points() { return points; }
+ PointExtractor();
+
+ settings_pt s;
+private:
+ std::vector<cv::Vec2f> points;
+};
+
+#endif //POINTEXTRACTOR_H
diff --git a/tracker-pt/point_tracker.cpp b/tracker-pt/point_tracker.cpp
new file mode 100644
index 00000000..924b75de
--- /dev/null
+++ b/tracker-pt/point_tracker.cpp
@@ -0,0 +1,267 @@
+/* Copyright (c) 2012 Patrick Ruoff
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ */
+
+#include "point_tracker.h"
+
+#include <vector>
+#include <algorithm>
+#include <cmath>
+
+#include <QDebug>
+
+const float PI = 3.14159265358979323846f;
+
+static void get_row(const cv::Matx33f& m, int i, cv::Vec3f& v)
+{
+ v[0] = m(i,0);
+ v[1] = m(i,1);
+ v[2] = m(i,2);
+}
+
+static void set_row(cv::Matx33f& m, int i, const cv::Vec3f& v)
+{
+ m(i,0) = v[0];
+ m(i,1) = v[1];
+ m(i,2) = v[2];
+}
+
+static bool d_vals_sort(const std::pair<float,int> a, const std::pair<float,int> b)
+{
+ return a.first < b.first;
+}
+
+void PointModel::get_d_order(const std::vector<cv::Vec2f>& points, int d_order[], cv::Vec2f d) const
+{
+ // fit line to orthographically projected points
+ std::vector<std::pair<float,int>> d_vals;
+ // get sort indices with respect to d scalar product
+ for (unsigned i = 0; i<points.size(); ++i)
+ d_vals.push_back(std::pair<float, int>(d.dot(points[i]), i));
+
+ std::sort(d_vals.begin(),
+ d_vals.end(),
+ d_vals_sort
+ );
+
+ for (unsigned i = 0; i<points.size(); ++i)
+ d_order[i] = d_vals[i].second;
+}
+
+
+PointTracker::PointTracker() : init_phase(true)
+{
+}
+
+PointTracker::PointOrder PointTracker::find_correspondences_previous(const std::vector<cv::Vec2f>& points, const PointModel& model, float f)
+{
+ PointTracker::PointOrder p;
+ p.points[0] = project(cv::Vec3f(0,0,0), f);
+ p.points[1] = project(model.M01, f);
+ p.points[2] = project(model.M02, f);
+
+ // set correspondences by minimum distance to projected model point
+ bool point_taken[PointModel::N_POINTS];
+ for (int i=0; i<PointModel::N_POINTS; ++i)
+ point_taken[i] = false;
+
+ for (int i=0; i<PointModel::N_POINTS; ++i)
+ {
+ float min_sdist = 0;
+ int min_idx = 0;
+ // find closest point to projected model point i
+ for (int j=0; j<PointModel::N_POINTS; ++j)
+ {
+ cv::Vec2f d = p.points[i]-points[j];
+ float sdist = d.dot(d);
+ if (sdist < min_sdist || j==0)
+ {
+ min_idx = j;
+ min_sdist = sdist;
+ }
+ }
+ // if one point is closest to more than one model point, fallback
+ if (point_taken[min_idx])
+ {
+ init_phase = true;
+ return find_correspondences(points, model);
+ }
+ point_taken[min_idx] = true;
+ p.points[i] = points[min_idx];
+ }
+ return p;
+}
+
+void PointTracker::track(const std::vector<cv::Vec2f>& points, const PointModel& model, float f, bool dynamic_pose, int init_phase_timeout)
+{
+ PointOrder order;
+
+ if (t.elapsed_ms() > init_phase_timeout)
+ {
+ t.start();
+ init_phase = true;
+ }
+
+ if (!dynamic_pose || init_phase)
+ order = find_correspondences(points, model);
+ else
+ order = find_correspondences_previous(points, model, f);
+
+ POSIT(model, order, f);
+ init_phase = false;
+ t.start();
+}
+
+PointTracker::PointOrder PointTracker::find_correspondences(const std::vector<cv::Vec2f>& points, const PointModel& model)
+{
+ // We do a simple freetrack-like sorting in the init phase...
+ // sort points
+ int point_d_order[PointModel::N_POINTS];
+ int model_d_order[PointModel::N_POINTS];
+ cv::Vec2f d(model.M01[0]-model.M02[0], model.M01[1]-model.M02[1]);
+ model.get_d_order(points, point_d_order, d);
+ // calculate d and d_order for simple freetrack-like point correspondence
+ model.get_d_order(std::vector<cv::Vec2f> {
+ cv::Vec2f{0,0},
+ cv::Vec2f(model.M01[0], model.M01[1]),
+ cv::Vec2f(model.M02[0], model.M02[1])
+ },
+ model_d_order,
+ d);
+ // set correspondences
+ PointOrder p;
+ for (int i=0; i<PointModel::N_POINTS; ++i)
+ p.points[model_d_order[i]] = points[point_d_order[i]];
+
+ return p;
+}
+
+int PointTracker::POSIT(const PointModel& model, const PointOrder& order_, float focal_length)
+{
+ // POSIT algorithm for coplanar points as presented in
+ // [Denis Oberkampf, Daniel F. DeMenthon, Larry S. Davis: "Iterative Pose Estimation Using Coplanar Feature Points"]
+ // we use the same notation as in the paper here
+
+ // The expected rotation used for resolving the ambiguity in POSIT:
+ // In every iteration step the rotation closer to R_expected is taken
+ cv::Matx33f R_expected = cv::Matx33f::eye();
+
+ // initial pose = last (predicted) pose
+ cv::Vec3f k;
+ get_row(R_expected, 2, k);
+ float Z0 = 1000.f;
+
+ float old_epsilon_1 = 0;
+ float old_epsilon_2 = 0;
+ float epsilon_1 = 1;
+ float epsilon_2 = 1;
+
+ cv::Vec3f I0, J0;
+ cv::Vec2f I0_coeff, J0_coeff;
+
+ cv::Vec3f I_1, J_1, I_2, J_2;
+ cv::Matx33f R_1, R_2;
+ cv::Matx33f* R_current;
+
+ const int MAX_ITER = 100;
+ const float EPS_THRESHOLD = 1e-4;
+
+ const cv::Vec2f* order = order_.points;
+
+ int i=1;
+ for (; i<MAX_ITER; ++i)
+ {
+ epsilon_1 = k.dot(model.M01)/Z0;
+ epsilon_2 = k.dot(model.M02)/Z0;
+
+ // vector of scalar products <I0, M0i> and <J0, M0i>
+ cv::Vec2f I0_M0i(order[1][0]*(1.0 + epsilon_1) - order[0][0],
+ order[2][0]*(1.0 + epsilon_2) - order[0][0]);
+ cv::Vec2f J0_M0i(order[1][1]*(1.0 + epsilon_1) - order[0][1],
+ order[2][1]*(1.0 + epsilon_2) - order[0][1]);
+
+ // construct projection of I, J onto M0i plane: I0 and J0
+ I0_coeff = model.P * I0_M0i;
+ J0_coeff = model.P * J0_M0i;
+ I0 = I0_coeff[0]*model.M01 + I0_coeff[1]*model.M02;
+ J0 = J0_coeff[0]*model.M01 + J0_coeff[1]*model.M02;
+
+ // calculate u component of I, J
+ float II0 = I0.dot(I0);
+ float IJ0 = I0.dot(J0);
+ float JJ0 = J0.dot(J0);
+ float rho, theta;
+ if (JJ0 == II0) {
+ rho = std::sqrt(std::abs(2*IJ0));
+ theta = -PI/4;
+ if (IJ0<0) theta *= -1;
+ }
+ else {
+ rho = sqrt(sqrt( (JJ0-II0)*(JJ0-II0) + 4*IJ0*IJ0 ));
+ theta = atan( -2*IJ0 / (JJ0-II0) );
+ if (JJ0 - II0 < 0) theta += PI;
+ theta /= 2;
+ }
+
+ // construct the two solutions
+ I_1 = I0 + rho*cos(theta)*model.u;
+ I_2 = I0 - rho*cos(theta)*model.u;
+
+ J_1 = J0 + rho*sin(theta)*model.u;
+ J_2 = J0 - rho*sin(theta)*model.u;
+
+ float norm_const = 1.0/cv::norm(I_1); // all have the same norm
+
+ // create rotation matrices
+ I_1 *= norm_const; J_1 *= norm_const;
+ I_2 *= norm_const; J_2 *= norm_const;
+
+ set_row(R_1, 0, I_1);
+ set_row(R_1, 1, J_1);
+ set_row(R_1, 2, I_1.cross(J_1));
+
+ set_row(R_2, 0, I_2);
+ set_row(R_2, 1, J_2);
+ set_row(R_2, 2, I_2.cross(J_2));
+
+ // the single translation solution
+ Z0 = norm_const * focal_length;
+
+ // pick the rotation solution closer to the expected one
+ // in simple metric d(A,B) = || I - A * B^T ||
+ float R_1_deviation = cv::norm(cv::Matx33f::eye() - R_expected * R_1.t());
+ float R_2_deviation = cv::norm(cv::Matx33f::eye() - R_expected * R_2.t());
+
+ if (R_1_deviation < R_2_deviation)
+ R_current = &R_1;
+ else
+ R_current = &R_2;
+
+ get_row(*R_current, 2, k);
+
+ // check for convergence condition
+ if (std::abs(epsilon_1 - old_epsilon_1) + std::abs(epsilon_2 - old_epsilon_2) < EPS_THRESHOLD)
+ break;
+ old_epsilon_1 = epsilon_1;
+ old_epsilon_2 = epsilon_2;
+ }
+
+ // apply results
+ X_CM.R = *R_current;
+ X_CM.t[0] = order[0][0] * Z0/focal_length;
+ X_CM.t[1] = order[0][1] * Z0/focal_length;
+ X_CM.t[2] = Z0;
+
+ //qDebug() << "iter:" << i;
+
+ return i;
+}
+
+cv::Vec2f PointTracker::project(const cv::Vec3f& v_M, float f)
+{
+ cv::Vec3f v_C = X_CM * v_M;
+ return cv::Vec2f(f*v_C[0]/v_C[2], f*v_C[1]/v_C[2]);
+}
diff --git a/tracker-pt/point_tracker.h b/tracker-pt/point_tracker.h
new file mode 100644
index 00000000..f4268486
--- /dev/null
+++ b/tracker-pt/point_tracker.h
@@ -0,0 +1,151 @@
+/* Copyright (c) 2012 Patrick Ruoff
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ */
+
+#ifndef POINTTRACKER_H
+#define POINTTRACKER_H
+
+#include <opencv2/core/core.hpp>
+#include <memory>
+#include <vector>
+#include "opentrack-compat/timer.hpp"
+#include "ftnoir_tracker_pt_settings.h"
+
+#include <QObject>
+
+// ----------------------------------------------------------------------------
+// Affine frame trafo
+class Affine
+{
+public:
+ Affine() : R(cv::Matx33f::eye()), t(0,0,0) {}
+ Affine(const cv::Matx33f& R, const cv::Vec3f& t) : R(R),t(t) {}
+
+ cv::Matx33f R;
+ cv::Vec3f t;
+};
+
+inline Affine operator*(const Affine& X, const Affine& Y)
+{
+ return Affine(X.R*Y.R, X.R*Y.t + X.t);
+}
+
+inline Affine operator*(const cv::Matx33f& X, const Affine& Y)
+{
+ return Affine(X*Y.R, X*Y.t);
+}
+
+inline Affine operator*(const Affine& X, const cv::Matx33f& Y)
+{
+ return Affine(X.R*Y, X.t);
+}
+
+inline cv::Vec3f operator*(const Affine& X, const cv::Vec3f& v)
+{
+ return X.R*v + X.t;
+}
+
+
+// ----------------------------------------------------------------------------
+// Describes a 3-point model
+// nomenclature as in
+// [Denis Oberkampf, Daniel F. DeMenthon, Larry S. Davis: "Iterative Pose Estimation Using Coplanar Feature Points"]
+class PointModel
+{
+ friend class PointTracker;
+public:
+ static constexpr int N_POINTS = 3;
+
+ cv::Vec3f M01; // M01 in model frame
+ cv::Vec3f M02; // M02 in model frame
+
+ cv::Vec3f u; // unit vector perpendicular to M01,M02-plane
+
+ cv::Matx22f P;
+
+ PointModel(settings_pt& s)
+ {
+ set_model(s);
+ // calculate u
+ u = M01.cross(M02);
+ u /= norm(u);
+
+ // calculate projection matrix on M01,M02 plane
+ float s11 = M01.dot(M01);
+ float s12 = M01.dot(M02);
+ float s22 = M02.dot(M02);
+ P = 1.0/(s11*s22-s12*s12) * cv::Matx22f(s22, -s12, -s12, s11);
+ }
+
+ void set_model(settings_pt& s)
+ {
+ enum { Cap = 0, ClipRight = 1, ClipLeft = 2 };
+
+ switch (s.model_used)
+ {
+ default:
+ case Cap:
+ {
+ const double x = 60, y = 100, z = 120;
+ M01 = cv::Vec3f(-x, -y, -z);
+ M02 = cv::Vec3f(x, -y, -z);
+ break;
+ }
+ case ClipLeft:
+ case ClipRight:
+ {
+ const double a = 27, b = 43, c = 62, d = 74;
+ M01 = cv::Vec3f(0, b, -a);
+ M02 = cv::Vec3f(0, -c, -d);
+ break;
+ }
+ }
+ }
+
+ void get_d_order(const std::vector<cv::Vec2f>& points, int* d_order, cv::Vec2f d) const;
+};
+
+// ----------------------------------------------------------------------------
+// Tracks a 3-point model
+// implementing the POSIT algorithm for coplanar points as presented in
+// [Denis Oberkampf, Daniel F. DeMenthon, Larry S. Davis: "Iterative Pose Estimation Using Coplanar Feature Points"]
+class PointTracker
+{
+public:
+ PointTracker();
+ // track the pose using the set of normalized point coordinates (x pos in range -0.5:0.5)
+ // f : (focal length)/(sensor width)
+ // dt : time since last call
+ void track(const std::vector<cv::Vec2f>& projected_points, const PointModel& model, float f, bool dynamic_pose, int init_phase_timeout);
+ Affine pose() const { return X_CM; }
+ cv::Vec2f project(const cv::Vec3f& v_M, float f);
+ void reset(const Affine& pose)
+ {
+ X_CM = pose;
+ }
+private:
+ // the points in model order
+ struct PointOrder
+ {
+ cv::Vec2f points[PointModel::N_POINTS];
+ PointOrder()
+ {
+ for (int i = 0; i < PointModel::N_POINTS; i++)
+ points[i] = cv::Vec2f(0, 0);
+ }
+ };
+
+ PointOrder find_correspondences(const std::vector<cv::Vec2f>& projected_points, const PointModel &model);
+ PointOrder find_correspondences_previous(const std::vector<cv::Vec2f>& points, const PointModel &model, float f);
+ int POSIT(const PointModel& point_model, const PointOrder& order, float focal_length); // The POSIT algorithm, returns the number of iterations
+
+ Affine X_CM; // trafo from model to camera
+
+ Timer t;
+ bool init_phase;
+};
+
+#endif //POINTTRACKER_H
diff --git a/tracker-pt/pt_video_widget.cpp b/tracker-pt/pt_video_widget.cpp
new file mode 100644
index 00000000..cbb7c268
--- /dev/null
+++ b/tracker-pt/pt_video_widget.cpp
@@ -0,0 +1,55 @@
+/* Copyright (c) 2012 Patrick Ruoff
+ * Copyright (c) 2015 Stanislaw Halik <sthalik@misaki.pl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * 20130312, WVR: Add 7 lines to resizeGL after resize_frame. This should lower CPU-load.
+ */
+
+#include "pt_video_widget.h"
+
+void PTVideoWidget::update_image(const cv::Mat& frame)
+{
+ QMutexLocker foo(&mtx);
+
+ if (!freshp)
+ {
+ _frame = frame.clone();
+ freshp = true;
+ }
+}
+
+void PTVideoWidget::update_and_repaint()
+{
+ QImage qframe;
+ {
+ QMutexLocker foo(&mtx);
+ if (_frame.empty() || !freshp)
+ return;
+ qframe = QImage(_frame.cols, _frame.rows, QImage::Format_RGB888);
+ freshp = false;
+ uchar* data = qframe.bits();
+ const int pitch = qframe.bytesPerLine();
+ unsigned char *input = (unsigned char*) _frame.data;
+ const int chans = _frame.channels();
+ for (int y = 0; y < _frame.rows; y++)
+ {
+ const int step = y * _frame.step;
+ const int pitch_ = y * pitch;
+ for (int x = 0; x < _frame.cols; x++)
+ {
+ data[pitch_ + x * 3 + 0] = input[step + x * chans + 2];
+ data[pitch_ + x * 3 + 1] = input[step + x * chans + 1];
+ data[pitch_ + x * 3 + 2] = input[step + x * chans + 0];
+ }
+ }
+ }
+ qframe = qframe.scaled(size(), Qt::IgnoreAspectRatio, Qt::FastTransformation);
+ {
+ QMutexLocker foo(&mtx);
+ texture = qframe;
+ }
+ update();
+}
diff --git a/tracker-pt/pt_video_widget.h b/tracker-pt/pt_video_widget.h
new file mode 100644
index 00000000..af1d60fd
--- /dev/null
+++ b/tracker-pt/pt_video_widget.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2012 Patrick Ruoff
+ * Copyright (c) 2014 Stanislaw Halik <sthalik@misaki.pl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ */
+
+#pragma once
+
+#include <QObject>
+#include <QWidget>
+#include <opencv2/core/core.hpp>
+#include <memory>
+#include <QPainter>
+#include <QPaintEvent>
+#include <QTimer>
+#include <QMutex>
+#include <QMutexLocker>
+
+class PTVideoWidget : public QWidget
+{
+ Q_OBJECT
+
+public:
+ PTVideoWidget(QWidget *parent) :
+ QWidget(parent),
+ freshp(false)
+ {
+ connect(&timer, SIGNAL(timeout()), this, SLOT(update_and_repaint()));
+ timer.start(40);
+ }
+ void update_image(const cv::Mat &frame);
+protected slots:
+ void paintEvent( QPaintEvent* e ) {
+ QMutexLocker foo(&mtx);
+ QPainter painter(this);
+ painter.drawImage(e->rect(), texture);
+ }
+ void update_and_repaint();
+private:
+ QMutex mtx;
+ QImage texture;
+ QTimer timer;
+ cv::Mat _frame;
+ bool freshp;
+};