summaryrefslogtreecommitdiffhomepage
path: root/tracker-pt
diff options
context:
space:
mode:
authorStanislaw Halik <sthalik@misaki.pl>2016-01-06 20:07:13 +0100
committerStanislaw Halik <sthalik@misaki.pl>2016-01-06 20:16:18 +0100
commit82f3d7373234cc0db79a22d476cb54b5eda7a0ea (patch)
tree65ee0194ad064cc470f95f7ca8efd533b089ca96 /tracker-pt
parent7e3807d048c5e0a8e0aa64fb49807bf5dfd11fc1 (diff)
parentf02baa0868f219076a641634625f7c032d3a9eef (diff)
Merge branch 'unstable' into trackhat
* unstable: (140 commits) tracker/pt: increase auto threshold bucket size again tracker/pt: limit max amount of extracted blobs gui: don't update main window if we're minimized tracker/pt: only show widget if the frame is visible tracker/pt: don't resize twice in widget freetrack/games: regen contrib/make-csv: perl sort isn't stable, don't ignore case tracker/pt: avoid widget temp QImage allocation spline-widget: oops, pass by reference tracker/pt: don't allocate temporary dynamic size arrays tracker/pt: don't copy points array needlessly tracker/pt: don't allocate temporary frame tracker/pt: cv::Mat::at<T> is slow, use cv::Mat::ptr tracker/pt: avoid widget malloc when able tracker/pt: optimize widget tracker/pt: update video widget at 40 -> 50 ms cmake/mingw-w64: update tracker/pt: reduce mutex contention gui: fix left margin tracker/pt: remove krap tracker/pt: move ctor out of the loop tracker/pt: nix unused tracker/pt: don't fill mask on frame pose-widget: also bilinear interpolation of alpha value ui: adjust margin ui: make more compact glwidget: use transparent octopus background api/mat: fix typos/breakage api/joy: refresh only manually on certain events pt: histogram more granular 6 -> 8 cmake/api: link with strmiids.lib on win32 tracker/pt: reduce auto thresholding histogram bucket size api/keys: prevent idempotent keys api/joy: move from header api/joy: prevent idempotent keypressed passed to receiver compat/options: get rid of std::string usage compat/options: move from header gui/settings: set parent, otherwise not modal gui/settings: don't forget to show a modal dialog before executing gui/main: don't raise a new window, it's enough to set visible api/joy: speed up poll_axis path api/joy: nix static, now that we're not a singleton tracker/joy: adapt to non-singleton joy worker joystick: no longer singleton, use fake window handle api/keys: use a fake window for DirectInput handle gui/keys: allow for pausing global keystrokes for options dialog api/keys: nix tautological #ifdef contrib/aruco: oops, right extension contrib/aruco: use @frost555's marker image api/camera-names: move to compat/ ...
Diffstat (limited to 'tracker-pt')
-rw-r--r--tracker-pt/camera.cpp10
-rw-r--r--tracker-pt/ftnoir_tracker_pt.cpp71
-rw-r--r--tracker-pt/ftnoir_tracker_pt.h6
-rw-r--r--tracker-pt/ftnoir_tracker_pt_settings.h2
-rw-r--r--tracker-pt/point_extractor.cpp68
-rw-r--r--tracker-pt/point_extractor.h13
-rw-r--r--tracker-pt/point_tracker.cpp1
-rw-r--r--tracker-pt/point_tracker.h10
-rw-r--r--tracker-pt/pt_video_widget.cpp41
-rw-r--r--tracker-pt/pt_video_widget.h5
10 files changed, 108 insertions, 119 deletions
diff --git a/tracker-pt/camera.cpp b/tracker-pt/camera.cpp
index 63b401a8..600ab26a 100644
--- a/tracker-pt/camera.cpp
+++ b/tracker-pt/camera.cpp
@@ -110,16 +110,14 @@ bool CVCamera::_get_frame(cv::Mat* frame)
{
if (cap && cap->isOpened())
{
- cv::Mat img;
- for (int i = 0; i < 100 && !cap->read(img); i++)
+ for (int i = 0; i < 100 && !cap->read(*frame); i++)
;;
- if (img.empty())
+ if (frame->empty())
return false;
- *frame = img;
- cam_info.res_x = img.cols;
- cam_info.res_y = img.rows;
+ cam_info.res_x = frame->cols;
+ cam_info.res_y = frame->rows;
return true;
}
return false;
diff --git a/tracker-pt/ftnoir_tracker_pt.cpp b/tracker-pt/ftnoir_tracker_pt.cpp
index 956f639e..a85e3bc0 100644
--- a/tracker-pt/ftnoir_tracker_pt.cpp
+++ b/tracker-pt/ftnoir_tracker_pt.cpp
@@ -12,14 +12,15 @@
#include <QDebug>
#include <QFile>
#include <QCoreApplication>
-#include "opentrack/camera-names.hpp"
+#include "opentrack-compat/camera-names.hpp"
+#include "opentrack-compat/sleep.hpp"
+#include <functional>
//#define PT_PERF_LOG //log performance
//-----------------------------------------------------------------------------
Tracker_PT::Tracker_PT()
- : mutex(QMutex::Recursive),
- commands(0),
+ : commands(0),
video_widget(NULL),
video_frame(NULL),
ever_success(false)
@@ -34,6 +35,8 @@ Tracker_PT::~Tracker_PT()
delete video_widget;
video_widget = NULL;
if (video_frame->layout()) delete video_frame->layout();
+ // fast start/stop causes breakage
+ portable::sleep(1000);
camera.stop();
}
@@ -88,44 +91,40 @@ void Tracker_PT::run()
#endif
apply_settings();
+ cv::Mat frame_;
while((commands & ABORT) == 0)
{
const double dt = time.elapsed() * 1e-9;
time.start();
- cv::Mat frame;
bool new_frame;
{
QMutexLocker l(&camera_mtx);
new_frame = camera.get_frame(dt, &frame);
+ if (frame.rows != frame_.rows || frame.cols != frame_.cols)
+ frame_ = cv::Mat(frame.rows, frame.cols, CV_8UC3);
+ frame.copyTo(frame_);
}
- if (new_frame && !frame.empty())
+ if (new_frame && !frame_.empty())
{
- QMutexLocker lock(&mutex);
-
- std::vector<cv::Vec2f> points = point_extractor.extract_points(frame);
-
- // blobs are sorted in order of circularity
- if (points.size() > PointModel::N_POINTS)
- points.resize(PointModel::N_POINTS);
-
- bool success = points.size() == PointModel::N_POINTS;
+ const auto& points = point_extractor.extract_points(frame_);
float fx;
if (!get_focal_length(fx))
continue;
+
+ const bool success = points.size() >= PointModel::N_POINTS;
if (success)
{
point_tracker.track(points, PointModel(s), fx, s.dynamic_pose, s.init_phase_timeout);
+ ever_success = true;
}
Affine X_CM = pose();
- ever_success |= success;
-
{
Affine X_MH(cv::Matx33f::eye(), cv::Vec3f(s.t_MH_x, s.t_MH_y, s.t_MH_z)); // just copy pasted these lines from below
if (X_MH.t[0] == 0 && X_MH.t[1] == 0 && X_MH.t[2] == 0)
@@ -142,38 +141,39 @@ void Tracker_PT::run()
case 2: X_MH.t[0] = -135; X_MH.t[1] = 0; X_MH.t[2] = 0; break;
}
}
- Affine X_GH = X_CM * X_MH;
- cv::Vec3f p = X_GH.t; // head (center?) position in global space
- cv::Vec2f p_(p[0] / p[2] * fx, p[1] / p[2] * fx); // projected to screen
- points.push_back(p_);
}
- for (unsigned i = 0; i < points.size(); i++)
+
+ std::function<void(const cv::Vec2f&, const cv::Scalar)> fun = [&](const cv::Vec2f& p, const cv::Scalar color)
{
- auto& p = points[i];
- auto p2 = cv::Point(p[0] * frame.cols + frame.cols/2, -p[1] * frame.cols + frame.rows/2);
- cv::Scalar color(0, 255, 0);
- if (i == points.size()-1)
- color = cv::Scalar(0, 0, 255);
- cv::line(frame,
+ auto p2 = cv::Point(p[0] * frame_.cols + frame_.cols/2, -p[1] * frame_.cols + frame_.rows/2);
+ cv::line(frame_,
cv::Point(p2.x - 20, p2.y),
cv::Point(p2.x + 20, p2.y),
color,
4);
- cv::line(frame,
+ cv::line(frame_,
cv::Point(p2.x, p2.y - 20),
cv::Point(p2.x, p2.y + 20),
color,
- 4);
+ 4);
+ };
+
+ for (unsigned i = 0; i < points.size(); i++)
+ {
+ fun(points[i], cv::Scalar(0, 255, 0));
+ }
+
+ {
+ Affine X_MH(cv::Matx33f::eye(), cv::Vec3f(s.t_MH_x, s.t_MH_y, s.t_MH_z)); // just copy pasted these lines from below
+ Affine X_GH = X_CM * X_MH;
+ cv::Vec3f p = X_GH.t; // head (center?) position in global space
+ cv::Vec2f p_(p[0] / p[2] * fx, p[1] / p[2] * fx); // projected to screen
+ fun(p_, cv::Scalar(0, 0, 255));
}
- video_widget->update_image(frame);
+ video_widget->update_image(frame_);
}
-#ifdef PT_PERF_LOG
- log_stream<<"dt: "<<dt;
- if (!frame.empty()) log_stream<<" fps: "<<camera.get_info().fps;
- log_stream<<"\n";
-#endif
}
qDebug()<<"Tracker:: Thread stopping";
}
@@ -213,6 +213,7 @@ void Tracker_PT::apply_settings()
camera.set_fps(cam_fps);
qDebug() << "camera start";
camera.start();
+ frame = cv::Mat();
qDebug()<<"Tracker::apply ends";
}
diff --git a/tracker-pt/ftnoir_tracker_pt.h b/tracker-pt/ftnoir_tracker_pt.h
index f73d106b..dff0c30a 100644
--- a/tracker-pt/ftnoir_tracker_pt.h
+++ b/tracker-pt/ftnoir_tracker_pt.h
@@ -40,15 +40,14 @@ public:
void start_tracker(QFrame* parent_window) override;
void data(double* data) override;
- Affine pose() { QMutexLocker lock(&mutex); return point_tracker.pose(); }
- int get_n_points() { QMutexLocker lock(&mutex); return point_extractor.get_points().size(); }
+ Affine pose() { return point_tracker.pose(); }
+ int get_n_points() { return point_extractor.get_points().size(); }
bool get_cam_info(CamInfo* info) { QMutexLocker lock(&camera_mtx); return camera.get_info(*info); }
public slots:
void apply_settings();
protected:
void run() override;
private:
- QMutex mutex;
// thread commands
enum Command {
ABORT = 1<<0
@@ -70,6 +69,7 @@ private:
settings_pt s;
Timer time;
+ cv::Mat frame;
volatile bool ever_success;
diff --git a/tracker-pt/ftnoir_tracker_pt_settings.h b/tracker-pt/ftnoir_tracker_pt_settings.h
index 78626468..85f068fe 100644
--- a/tracker-pt/ftnoir_tracker_pt_settings.h
+++ b/tracker-pt/ftnoir_tracker_pt_settings.h
@@ -9,7 +9,7 @@
#ifndef FTNOIR_TRACKER_PT_SETTINGS_H
#define FTNOIR_TRACKER_PT_SETTINGS_H
-#include "opentrack/options.hpp"
+#include "opentrack-compat/options.hpp"
using namespace options;
struct settings_pt : opts
diff --git a/tracker-pt/point_extractor.cpp b/tracker-pt/point_extractor.cpp
index ec37dd00..0208b11d 100644
--- a/tracker-pt/point_extractor.cpp
+++ b/tracker-pt/point_extractor.cpp
@@ -13,20 +13,22 @@
# include "opentrack-compat/timer.hpp"
#endif
-PointExtractor::PointExtractor(){
- //if (!AllocConsole()){}
- //else SetConsoleTitle("debug");
- //freopen("CON", "w", stdout);
- //freopen("CON", "w", stderr);
+PointExtractor::PointExtractor()
+{
}
-// ----------------------------------------------------------------------------
-std::vector<cv::Vec2f> PointExtractor::extract_points(cv::Mat& frame)
+
+const std::vector<cv::Vec2f>& PointExtractor::extract_points(cv::Mat& frame)
{
const int W = frame.cols;
const int H = frame.rows;
+
+ if (frame_gray.rows != frame.rows || frame_gray.cols != frame.cols)
+ {
+ frame_gray = cv::Mat(frame.rows, frame.cols, CV_8U);
+ frame_bin = cv::Mat(frame.rows, frame.cols, CV_8U);;
+ }
// convert to grayscale
- cv::Mat frame_gray;
cv::cvtColor(frame, frame_gray, cv::COLOR_RGB2GRAY);
const double region_size_min = s.min_point_size;
@@ -51,7 +53,6 @@ std::vector<cv::Vec2f> PointExtractor::extract_points(cv::Mat& frame)
};
// mask for everything that passes the threshold (or: the upper threshold of the hysteresis)
- cv::Mat frame_bin = cv::Mat::zeros(H, W, CV_8U);
std::vector<blob> blobs;
std::vector<std::vector<cv::Point>> contours;
@@ -59,42 +60,39 @@ std::vector<cv::Vec2f> PointExtractor::extract_points(cv::Mat& frame)
const int thres = s.threshold;
if (!s.auto_threshold)
{
- cv::Mat frame_bin_;
- cv::threshold(frame_gray, frame_bin_, thres, 255, cv::THRESH_BINARY);
- frame_bin.setTo(170, frame_bin_);
- cv::findContours(frame_bin_, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
+ cv::threshold(frame_gray, frame_bin, thres, 255, cv::THRESH_BINARY);
+ cv::findContours(frame_bin, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
}
else
{
- cv::Mat hist;
cv::calcHist(std::vector<cv::Mat> { frame_gray },
std::vector<int> { 0 },
cv::Mat(),
hist,
- std::vector<int> { 256 },
- std::vector<float> { 0, 256 },
+ std::vector<int> { 256/hist_c },
+ std::vector<float> { 0, 256/hist_c },
false);
- const int sz = hist.rows*hist.cols;
+ const int sz = hist.cols * hist.rows;
int val = 0;
int cnt = 0;
constexpr int min_pixels = 250;
const auto pixels_to_include = std::max<int>(0, min_pixels * s.threshold/100.);
+ auto ptr = reinterpret_cast<const float*>(hist.ptr(0));
for (int i = sz-1; i >= 0; i--)
{
- cnt += hist.at<float>(i);
+ cnt += ptr[i];
if (cnt >= pixels_to_include)
{
val = i;
break;
}
}
+ val *= hist_c;
val *= 240./256.;
//qDebug() << "val" << val;
- cv::Mat frame_bin_;
- cv::threshold(frame_gray, frame_bin_, val, 255, CV_THRESH_BINARY);
- frame_bin.setTo(170, frame_bin_);
- cv::findContours(frame_bin_, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
+ cv::threshold(frame_gray, frame_bin, val, 255, CV_THRESH_BINARY);
+ cv::findContours(frame_bin, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
}
int cnt = 0;
@@ -150,31 +148,27 @@ std::vector<cv::Vec2f> PointExtractor::extract_points(cv::Mat& frame)
}
blobs.push_back(blob(radius, pos, confid, area));
+
+ enum { max_blobs = 16 };
+
+ if (blobs.size() == max_blobs)
+ break;
}
- // clear old points
- points.clear();
-
using b = const blob;
std::sort(blobs.begin(), blobs.end(), [](b& b1, b& b2) {return b1.confid > b2.confid;});
+ points.reserve(blobs.size());
+
+ QMutexLocker l(&mtx);
+
+ points.clear();
+
for (auto& b : blobs)
{
cv::Vec2f p((b.pos[0] - W/2)/W, -(b.pos[1] - H/2)/W);
points.push_back(p);
}
- // draw output image
- std::vector<cv::Mat> channels_;
- cv::split(frame, channels_);
- std::vector<cv::Mat> channels;
- {
- cv::Mat frame_bin__ = frame_bin * .5;
- channels.push_back(channels_[0] + frame_bin__);
- channels.push_back(channels_[1] - frame_bin__);
- channels.push_back(channels_[2] - frame_bin__);
- cv::merge(channels, frame);
- }
-
return points;
}
diff --git a/tracker-pt/point_extractor.h b/tracker-pt/point_extractor.h
index b9368ab6..030251ff 100644
--- a/tracker-pt/point_extractor.h
+++ b/tracker-pt/point_extractor.h
@@ -13,21 +13,26 @@
#include "ftnoir_tracker_pt_settings.h"
-// ----------------------------------------------------------------------------
-// Extracts points from an opencv image
+#include <QMutex>
+
class PointExtractor
{
public:
// extracts points from frame and draws some processing info into frame, if draw_output is set
// dt: time since last call in seconds
// WARNING: returned reference is valid as long as object
- std::vector<cv::Vec2f> extract_points(cv::Mat &frame);
- const std::vector<cv::Vec2f>& get_points() { return points; }
+ const std::vector<cv::Vec2f> &extract_points(cv::Mat &frame);
+ const std::vector<cv::Vec2f>& get_points() { QMutexLocker l(&mtx); return points; }
PointExtractor();
settings_pt s;
private:
+ enum { hist_c = 2 };
std::vector<cv::Vec2f> points;
+ QMutex mtx;
+ cv::Mat frame_gray;
+ cv::Mat frame_bin;
+ cv::Mat hist;
};
#endif //POINTEXTRACTOR_H
diff --git a/tracker-pt/point_tracker.cpp b/tracker-pt/point_tracker.cpp
index 924b75de..aa6feb5b 100644
--- a/tracker-pt/point_tracker.cpp
+++ b/tracker-pt/point_tracker.cpp
@@ -249,6 +249,7 @@ int PointTracker::POSIT(const PointModel& model, const PointOrder& order_, float
old_epsilon_2 = epsilon_2;
}
+ QMutexLocker l(&mtx);
// apply results
X_CM.R = *R_current;
X_CM.t[0] = order[0][0] * Z0/focal_length;
diff --git a/tracker-pt/point_tracker.h b/tracker-pt/point_tracker.h
index 8c754718..48c7617e 100644
--- a/tracker-pt/point_tracker.h
+++ b/tracker-pt/point_tracker.h
@@ -15,9 +15,8 @@
#include "ftnoir_tracker_pt_settings.h"
#include <QObject>
+#include <QMutex>
-// ----------------------------------------------------------------------------
-// Affine frame trafo
class Affine
{
public:
@@ -120,12 +119,8 @@ public:
// f : (focal length)/(sensor width)
// dt : time since last call
void track(const std::vector<cv::Vec2f>& projected_points, const PointModel& model, float f, bool dynamic_pose, int init_phase_timeout);
- Affine pose() const { return X_CM; }
+ Affine pose() { QMutexLocker l(&mtx); return X_CM; }
cv::Vec2f project(const cv::Vec3f& v_M, float f);
- void reset(const Affine& pose)
- {
- X_CM = pose;
- }
private:
// the points in model order
struct PointOrder
@@ -146,6 +141,7 @@ private:
Timer t;
bool init_phase;
+ QMutex mtx;
};
#endif //POINTTRACKER_H
diff --git a/tracker-pt/pt_video_widget.cpp b/tracker-pt/pt_video_widget.cpp
index cbb7c268..99f86eb2 100644
--- a/tracker-pt/pt_video_widget.cpp
+++ b/tracker-pt/pt_video_widget.cpp
@@ -9,6 +9,7 @@
*/
#include "pt_video_widget.h"
+#include <opencv2/imgproc.hpp>
void PTVideoWidget::update_image(const cv::Mat& frame)
{
@@ -16,40 +17,32 @@ void PTVideoWidget::update_image(const cv::Mat& frame)
if (!freshp)
{
- _frame = frame.clone();
+ if (_frame.cols != frame.cols || _frame.rows != frame.rows)
+ {
+ _frame = cv::Mat(frame.rows, frame.cols, CV_8U);
+ _frame2 = cv::Mat(frame.rows, frame.cols, CV_8U);
+ }
+ frame.copyTo(_frame);
freshp = true;
}
}
void PTVideoWidget::update_and_repaint()
{
- QImage qframe;
+ if (static_cast<QWidget*>(parent())->isEnabled())
{
QMutexLocker foo(&mtx);
if (_frame.empty() || !freshp)
return;
- qframe = QImage(_frame.cols, _frame.rows, QImage::Format_RGB888);
+ cv::cvtColor(_frame, _frame2, cv::COLOR_RGB2BGR);
+
+ if (_frame3.cols != width() || _frame3.rows != height())
+ _frame3 = cv::Mat(height(), width(), CV_8U);
+
+ cv::resize(_frame2, _frame3, cv::Size(width(), height()), 0, 0, cv::INTER_NEAREST);
+
+ texture = QImage((const unsigned char*) _frame3.data, _frame3.cols, _frame3.rows, QImage::Format_RGB888);
freshp = false;
- uchar* data = qframe.bits();
- const int pitch = qframe.bytesPerLine();
- unsigned char *input = (unsigned char*) _frame.data;
- const int chans = _frame.channels();
- for (int y = 0; y < _frame.rows; y++)
- {
- const int step = y * _frame.step;
- const int pitch_ = y * pitch;
- for (int x = 0; x < _frame.cols; x++)
- {
- data[pitch_ + x * 3 + 0] = input[step + x * chans + 2];
- data[pitch_ + x * 3 + 1] = input[step + x * chans + 1];
- data[pitch_ + x * 3 + 2] = input[step + x * chans + 0];
- }
- }
- }
- qframe = qframe.scaled(size(), Qt::IgnoreAspectRatio, Qt::FastTransformation);
- {
- QMutexLocker foo(&mtx);
- texture = qframe;
+ update();
}
- update();
}
diff --git a/tracker-pt/pt_video_widget.h b/tracker-pt/pt_video_widget.h
index af1d60fd..d9144ac0 100644
--- a/tracker-pt/pt_video_widget.h
+++ b/tracker-pt/pt_video_widget.h
@@ -17,6 +17,7 @@
#include <QTimer>
#include <QMutex>
#include <QMutexLocker>
+#include <QDebug>
class PTVideoWidget : public QWidget
{
@@ -28,7 +29,7 @@ public:
freshp(false)
{
connect(&timer, SIGNAL(timeout()), this, SLOT(update_and_repaint()));
- timer.start(40);
+ timer.start(50);
}
void update_image(const cv::Mat &frame);
protected slots:
@@ -42,6 +43,6 @@ private:
QMutex mtx;
QImage texture;
QTimer timer;
- cv::Mat _frame;
+ cv::Mat _frame, _frame2, _frame3;
bool freshp;
};