summaryrefslogtreecommitdiffhomepage
path: root/tracker-pt/module
diff options
context:
space:
mode:
Diffstat (limited to 'tracker-pt/module')
-rw-r--r--tracker-pt/module/CMakeLists.txt10
-rw-r--r--tracker-pt/module/camera.cpp149
-rw-r--r--tracker-pt/module/camera.h30
-rw-r--r--tracker-pt/module/export.hpp11
-rw-r--r--tracker-pt/module/frame.cpp66
-rw-r--r--tracker-pt/module/frame.hpp20
-rw-r--r--tracker-pt/module/lang/de_DE.ts11
-rw-r--r--tracker-pt/module/lang/nl_NL.ts7
-rw-r--r--tracker-pt/module/lang/ru_RU.ts7
-rw-r--r--tracker-pt/module/lang/stub.ts7
-rw-r--r--tracker-pt/module/lang/zh_CN.ts9
-rw-r--r--tracker-pt/module/module.cpp21
-rw-r--r--tracker-pt/module/module.hpp20
-rw-r--r--tracker-pt/module/point_extractor.cpp307
-rw-r--r--tracker-pt/module/point_extractor.h26
15 files changed, 421 insertions, 280 deletions
diff --git a/tracker-pt/module/CMakeLists.txt b/tracker-pt/module/CMakeLists.txt
index 1d1b4458..b7fc974f 100644
--- a/tracker-pt/module/CMakeLists.txt
+++ b/tracker-pt/module/CMakeLists.txt
@@ -1,6 +1,10 @@
-find_package(OpenCV 3.0 QUIET)
+include(opentrack-opencv)
+find_package(OpenCV QUIET)
if(OpenCV_FOUND)
+ foreach(k core imgproc)
+ otr_install_lib("opencv_${k}" "${opentrack-libexec}")
+ endforeach()
otr_module(tracker-pt)
- target_link_libraries(opentrack-tracker-pt opentrack-tracker-pt-base)
- target_include_directories(opentrack-tracker-pt PRIVATE "${CMAKE_SOURCE_DIR}/tracker-pt")
+ target_link_libraries(${self} opentrack-video opencv_imgproc opentrack-tracker-pt-base)
+ target_include_directories(${self} PUBLIC "${CMAKE_SOURCE_DIR}/tracker-pt")
endif()
diff --git a/tracker-pt/module/camera.cpp b/tracker-pt/module/camera.cpp
index 9c62e8a3..1beba474 100644
--- a/tracker-pt/module/camera.cpp
+++ b/tracker-pt/module/camera.cpp
@@ -7,16 +7,9 @@
#include "camera.h"
#include "frame.hpp"
+#include <opencv2/core/mat.hpp>
-#include "compat/sleep.hpp"
-#include "compat/camera-names.hpp"
-#include "compat/math-imports.hpp"
-
-#include <opencv2/imgproc.hpp>
-
-#include "cv/video-property-page.hpp"
-
-using namespace pt_module;
+namespace pt_module {
Camera::Camera(const QString& module_name) : s { module_name }
{
@@ -24,22 +17,18 @@ Camera::Camera(const QString& module_name) : s { module_name }
QString Camera::get_desired_name() const
{
- return desired_name;
+ return cam_desired.name;
}
QString Camera::get_active_name() const
{
- return active_name;
+ return cam_info.name;
}
void Camera::show_camera_settings()
{
- const int idx = camera_name_to_index(s.camera_name);
-
- if (cap && cap->isOpened())
- video_property_page::show_from_capture(*cap, idx);
- else
- video_property_page::show(idx);
+ if (cap)
+ (void)cap->show_dialog();
}
Camera::result Camera::get_info() const
@@ -54,16 +43,16 @@ Camera::result Camera::get_frame(pt_frame& frame_)
{
cv::Mat& frame = frame_.as<Frame>()->mat;
- const bool new_frame = _get_frame(frame);
+ const bool new_frame = get_frame_(frame);
if (new_frame)
{
- const double dt = t.elapsed_seconds();
+ const f dt = (f)t.elapsed_seconds();
t.start();
// measure fps of valid frames
- constexpr double RC = .1; // seconds
- const double alpha = dt/(dt + RC);
+ constexpr f RC = f{1}/10; // seconds
+ const f alpha = dt/(dt + RC);
if (dt_mean < dt_eps)
dt_mean = dt;
@@ -75,64 +64,69 @@ Camera::result Camera::get_frame(pt_frame& frame_)
cam_info.res_y = frame.rows;
cam_info.fov = fov;
- return result(true, cam_info);
+ return { true, cam_info };
}
else
- return result(false, pt_camera_info());
+ return { false, {} };
}
-bool Camera::start(int idx, int fps, int res_x, int res_y)
+bool Camera::start(const pt_settings& s)
{
- if (idx >= 0 && fps >= 0 && res_x >= 0 && res_y >= 0)
+ int fps = s.cam_fps, res_x = s.cam_res_x, res_y = s.cam_res_y;
+ QString name = s.camera_name;
+ bool use_mjpeg = s.use_mjpeg;
+
+ if (fps >= 0 && res_x >= 0 && res_y >= 0)
{
- if (cam_desired.idx != idx ||
- cam_desired.fps != fps ||
+ if (cam_desired.name != name ||
+ (int)cam_desired.fps != fps ||
cam_desired.res_x != res_x ||
cam_desired.res_y != res_y ||
- !cap || !cap->isOpened() || !cap->grab())
+ cam_desired.use_mjpeg != use_mjpeg ||
+ !cap || !cap->is_open())
{
stop();
- desired_name = get_camera_names().value(idx);
- cam_desired.idx = idx;
- cam_desired.fps = fps;
+ cam_desired.name = name;
+ cam_desired.fps = (f)fps;
cam_desired.res_x = res_x;
cam_desired.res_y = res_y;
cam_desired.fov = fov;
+ cam_desired.use_mjpeg = use_mjpeg;
- cap = camera_ptr(new cv::VideoCapture(cam_desired.idx));
-
- if (cam_desired.res_x)
- cap->set(cv::CAP_PROP_FRAME_WIDTH, cam_desired.res_x);
- if (cam_desired.res_y)
- cap->set(cv::CAP_PROP_FRAME_HEIGHT, cam_desired.res_y);
- if (cam_desired.fps)
- cap->set(cv::CAP_PROP_FPS, cam_desired.fps);
-
- if (cap->isOpened())
- {
- cam_info = pt_camera_info();
- active_name = QString();
- cam_info.idx = idx;
- dt_mean = 0;
- active_name = desired_name;
-
- cv::Mat tmp;
-
- if (_get_frame(tmp))
- {
- t.start();
- return true;
- }
- }
-
- cap = nullptr;
- return false;
- }
+ cap = video::make_camera(name);
+
+ if (!cap)
+ goto fail;
+
+ camera::info info {};
+ info.fps = fps;
+ info.width = res_x;
+ info.height = res_y;
+ info.use_mjpeg = use_mjpeg;
+ info.num_channels = s.blob_color == pt_color_hardware ? 1 : 3;
+
+ if (!cap->start(info))
+ goto fail;
- return true;
+ cam_info = pt_camera_info();
+ cam_info.name = name;
+ cam_info.use_mjpeg = use_mjpeg;
+ cam_info.fov = (f)s.fov;
+ dt_mean = 0;
+
+ cv::Mat tmp;
+
+ if (!get_frame_(tmp))
+ goto fail;
+
+ t.start();
+ }
}
+ return true;
+
+fail:
stop();
return false;
}
@@ -140,33 +134,26 @@ bool Camera::start(int idx, int fps, int res_x, int res_y)
void Camera::stop()
{
cap = nullptr;
- desired_name = QString();
- active_name = QString();
- cam_info = pt_camera_info();
- cam_desired = pt_camera_info();
+ cam_info = {};
+ cam_desired = {};
}
-bool Camera::_get_frame(cv::Mat& frame)
+bool Camera::get_frame_(cv::Mat& img)
{
- if (cap && cap->isOpened())
+ if (cap && cap->is_open())
{
- for (int i = 0; i < 5; i++)
+ auto [ frame, ret ] = cap->get_frame();
+ if (ret)
{
- if (cap->read(frame))
- return true;
- portable::sleep(1);
+ int stride = frame.stride;
+ if (stride == 0)
+ stride = cv::Mat::AUTO_STEP;
+ img = cv::Mat(frame.height, frame.width, CV_8UC(frame.channels), (void*)frame.data, (size_t)stride);
+ return true;
}
}
- return false;
-}
-void Camera::camera_deleter::operator()(cv::VideoCapture* cap)
-{
- if (cap)
- {
- if (cap->isOpened())
- cap->release();
- delete cap;
- }
+ return false;
}
+} // ns pt_module
diff --git a/tracker-pt/module/camera.h b/tracker-pt/module/camera.h
index 79e3dca0..e4772178 100644
--- a/tracker-pt/module/camera.h
+++ b/tracker-pt/module/camera.h
@@ -8,15 +8,10 @@
#pragma once
#include "pt-api.hpp"
-
#include "compat/timer.hpp"
+#include "video/camera.hpp"
-#include <functional>
#include <memory>
-#include <tuple>
-
-#include <opencv2/core.hpp>
-#include <opencv2/videoio.hpp>
#include <QString>
@@ -26,7 +21,7 @@ struct Camera final : pt_camera
{
Camera(const QString& module_name);
- bool start(int idx, int fps, int res_x, int res_y) override;
+ bool start(const pt_settings& s) override;
void stop() override;
result get_frame(pt_frame& Frame) override;
@@ -36,30 +31,23 @@ struct Camera final : pt_camera
QString get_desired_name() const override;
QString get_active_name() const override;
- void set_fov(double value) override { fov = value; }
+ void set_fov(f value) override { fov = value; }
void show_camera_settings() override;
private:
- warn_result_unused bool _get_frame(cv::Mat& Frame);
+ using camera = video::impl::camera;
+
+ [[nodiscard]] bool get_frame_(cv::Mat& frame);
- double dt_mean = 0, fov = 30;
+ f dt_mean = 0, fov = 30;
Timer t;
pt_camera_info cam_info;
pt_camera_info cam_desired;
- QString desired_name, active_name;
-
- struct camera_deleter final
- {
- void operator()(cv::VideoCapture* cap);
- };
-
- using camera_ptr = std::unique_ptr<cv::VideoCapture, camera_deleter>;
-
- camera_ptr cap;
+ std::unique_ptr<camera> cap;
pt_settings s;
- static constexpr inline double dt_eps = 1./384;
+ static constexpr f dt_eps = f{1}/256;
};
} // ns pt_module
diff --git a/tracker-pt/module/export.hpp b/tracker-pt/module/export.hpp
new file mode 100644
index 00000000..a733c9fe
--- /dev/null
+++ b/tracker-pt/module/export.hpp
@@ -0,0 +1,11 @@
+// generates export.hpp for each module from compat/linkage.hpp
+
+#pragma once
+
+#include "compat/linkage-macros.hpp"
+
+#ifdef BUILD_TRACKER_PT
+# define OTR_PT_EXPORT OTR_GENERIC_EXPORT
+#else
+# define OTR_PT_EXPORT OTR_GENERIC_IMPORT
+#endif
diff --git a/tracker-pt/module/frame.cpp b/tracker-pt/module/frame.cpp
index e403af07..1a276f16 100644
--- a/tracker-pt/module/frame.cpp
+++ b/tracker-pt/module/frame.cpp
@@ -1,48 +1,54 @@
#include "frame.hpp"
-
#include "compat/math.hpp"
-
-#include <cstring>
-#include <tuple>
-
#include <opencv2/imgproc.hpp>
-using namespace pt_module;
+namespace pt_module {
-Preview& Preview::operator=(const pt_frame& frame_)
+void Preview::set_last_frame(const pt_frame& frame_)
{
const cv::Mat& frame = frame_.as_const<const Frame>()->mat;
- ensure_size(frame_copy, frame_out.cols, frame_out.rows, CV_8UC3);
+ const bool need_resize = frame.size != frame_copy.size;
- if (frame.channels() != 3)
+ if (frame.channels() == 1)
{
- once_only(qDebug() << "tracker/pt: camera frame depth: 3 !=" << frame.channels());
- return *this;
+ if (need_resize)
+ {
+ frame_tmp.create(frame.size(), CV_8UC3);
+ cv::cvtColor(frame, frame_tmp, cv::COLOR_GRAY2BGR);
+ cv::resize(frame_tmp, frame_copy, frame_copy.size(), 0, 0, cv::INTER_NEAREST);
+ }
+ else
+ cv::cvtColor(frame, frame_copy, cv::COLOR_GRAY2BGR);
+ }
+ else if (frame.channels() == 3)
+ {
+ if (need_resize)
+ cv::resize(frame, frame_copy, frame_copy.size(), 0, 0, cv::INTER_NEAREST);
+ else
+ frame.copyTo(frame_copy);
}
-
- const bool need_resize = frame.cols != frame_out.cols || frame.rows != frame_out.rows;
- if (need_resize)
- cv::resize(frame, frame_copy, cv::Size(frame_out.cols, frame_out.rows), 0, 0, cv::INTER_NEAREST);
else
- frame.copyTo(frame_copy);
-
- return *this;
+ {
+ eval_once(qDebug() << "tracker/pt: camera frame depth" << frame.channels() << "!= 3");
+ frame_copy.create(frame_copy.size(), CV_8UC3);
+ frame_copy.setTo({0});
+ }
}
Preview::Preview(int w, int h)
{
- ensure_size(frame_out, w, h, CV_8UC4);
-
- frame_out.setTo(cv::Scalar(0, 0, 0, 0));
+ frame_out.create(h, w, CV_8UC4);
+ frame_copy.create(h, w, CV_8UC3);
+ frame_copy.setTo({0});
}
QImage Preview::get_bitmap()
{
- int stride = frame_out.step.p[0];
+ int stride = (int)frame_out.step.p[0];
- if (stride < 64 || stride < frame_out.cols * 4)
+ if (stride < frame_out.cols * 4)
{
- once_only(qDebug() << "bad stride" << stride
+ eval_once(qDebug() << "bad stride" << stride
<< "for bitmap size" << frame_copy.cols << frame_copy.rows);
return QImage();
}
@@ -55,11 +61,9 @@ QImage Preview::get_bitmap()
QImage::Format_ARGB32);
}
-void Preview::draw_head_center(double x, double y)
+void Preview::draw_head_center(f x, f y)
{
- double px_, py_;
-
- std::tie(px_, py_) = to_pixel_pos(x, y, frame_copy.cols, frame_copy.rows);
+ auto [px_, py_] = to_pixel_pos(x, y, frame_copy.cols, frame_copy.rows);
int px = iround(px_), py = iround(py_);
@@ -76,8 +80,4 @@ void Preview::draw_head_center(double x, double y)
color, 1);
}
-void Preview::ensure_size(cv::Mat& frame, int w, int h, int type)
-{
- if (frame.cols != w || frame.rows != h)
- frame = cv::Mat(h, w, type);
-}
+} // ns pt_module
diff --git a/tracker-pt/module/frame.hpp b/tracker-pt/module/frame.hpp
index 9e4f809a..0569a323 100644
--- a/tracker-pt/module/frame.hpp
+++ b/tracker-pt/module/frame.hpp
@@ -2,9 +2,14 @@
#include "pt-api.hpp"
-#include <opencv2/core.hpp>
+#include <opencv2/core/mat.hpp>
#include <QImage>
+#ifdef __clang__
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wweak-vtables"
+#endif
+
namespace pt_module {
struct Frame final : pt_frame
@@ -19,18 +24,19 @@ struct Preview final : pt_preview
{
Preview(int w, int h);
- Preview& operator=(const pt_frame& frame) override;
+ void set_last_frame(const pt_frame& frame) override;
QImage get_bitmap() override;
- void draw_head_center(double x, double y) override;
+ void draw_head_center(f x, f y) override;
operator cv::Mat&() { return frame_copy; }
operator cv::Mat const&() const { return frame_copy; }
private:
- static void ensure_size(cv::Mat& frame, int w, int h, int type);
-
- bool fresh = true;
- cv::Mat frame_copy, frame_color, frame_out;
+ cv::Mat frame_copy, frame_out, frame_tmp;
};
} // ns pt_module
+
+#ifdef __clang__
+# pragma clang diagnostic pop
+#endif
diff --git a/tracker-pt/module/lang/de_DE.ts b/tracker-pt/module/lang/de_DE.ts
new file mode 100644
index 00000000..6c548aba
--- /dev/null
+++ b/tracker-pt/module/lang/de_DE.ts
@@ -0,0 +1,11 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!DOCTYPE TS>
+<TS version="2.1" language="de_DE">
+<context>
+ <name>pt_module::metadata_pt</name>
+ <message>
+ <source>PointTracker 1.1</source>
+ <translation>PointTracker 1.1</translation>
+ </message>
+</context>
+</TS>
diff --git a/tracker-pt/module/lang/nl_NL.ts b/tracker-pt/module/lang/nl_NL.ts
index 9e739505..4679971e 100644
--- a/tracker-pt/module/lang/nl_NL.ts
+++ b/tracker-pt/module/lang/nl_NL.ts
@@ -1,4 +1,11 @@
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.1" language="nl_NL">
+<context>
+ <name>pt_module::metadata_pt</name>
+ <message>
+ <source>PointTracker 1.1</source>
+ <translation type="unfinished"></translation>
+ </message>
+</context>
</TS>
diff --git a/tracker-pt/module/lang/ru_RU.ts b/tracker-pt/module/lang/ru_RU.ts
index f62cf2e1..c3611ef0 100644
--- a/tracker-pt/module/lang/ru_RU.ts
+++ b/tracker-pt/module/lang/ru_RU.ts
@@ -1,4 +1,11 @@
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.1" language="ru_RU">
+<context>
+ <name>pt_module::metadata_pt</name>
+ <message>
+ <source>PointTracker 1.1</source>
+ <translation type="unfinished"></translation>
+ </message>
+</context>
</TS>
diff --git a/tracker-pt/module/lang/stub.ts b/tracker-pt/module/lang/stub.ts
index 6401616d..03d19f4e 100644
--- a/tracker-pt/module/lang/stub.ts
+++ b/tracker-pt/module/lang/stub.ts
@@ -1,4 +1,11 @@
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.1">
+<context>
+ <name>pt_module::metadata_pt</name>
+ <message>
+ <source>PointTracker 1.1</source>
+ <translation type="unfinished"></translation>
+ </message>
+</context>
</TS>
diff --git a/tracker-pt/module/lang/zh_CN.ts b/tracker-pt/module/lang/zh_CN.ts
index 6401616d..c39728a1 100644
--- a/tracker-pt/module/lang/zh_CN.ts
+++ b/tracker-pt/module/lang/zh_CN.ts
@@ -1,4 +1,11 @@
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
-<TS version="2.1">
+<TS version="2.1" language="zh_CN">
+<context>
+ <name>pt_module::metadata_pt</name>
+ <message>
+ <source>PointTracker 1.1</source>
+ <translation type="unfinished"></translation>
+ </message>
+</context>
</TS>
diff --git a/tracker-pt/module/module.cpp b/tracker-pt/module/module.cpp
index 5c298ca5..f665face 100644
--- a/tracker-pt/module/module.cpp
+++ b/tracker-pt/module/module.cpp
@@ -1,6 +1,6 @@
#include "ftnoir_tracker_pt.h"
-#include "api/plugin-api.hpp"
+#include "module.hpp"
#include "camera.h"
#include "frame.hpp"
#include "point_extractor.h"
@@ -12,7 +12,11 @@
static const QString module_name = "tracker-pt";
-using namespace pt_module;
+#ifdef __clang__
+# pragma clang diagnostic ignored "-Wweak-vtables"
+#endif
+
+namespace pt_module {
struct pt_module_traits final : pt_runtime_traits
{
@@ -54,16 +58,15 @@ struct dialog_pt : TrackerDialog_PT
dialog_pt();
};
-class metadata_pt : public Metadata
-{
- QString name() { return _("PointTracker 1.1"); }
- QIcon icon() { return QIcon(":/Resources/Logo_IR.png"); }
-};
+dialog_pt::dialog_pt() : TrackerDialog_PT(module_name) {}
+
+QString metadata_pt::name() { return tr("PointTracker 1.1"); }
+QIcon metadata_pt::icon() { return QIcon(":/Resources/Logo_IR.png"); }
+
+}
// ns pt_module
using namespace pt_module;
-dialog_pt::dialog_pt() : TrackerDialog_PT(module_name) {}
-
OPENTRACK_DECLARE_TRACKER(tracker_pt, dialog_pt, metadata_pt)
diff --git a/tracker-pt/module/module.hpp b/tracker-pt/module/module.hpp
new file mode 100644
index 00000000..0b3f12cf
--- /dev/null
+++ b/tracker-pt/module/module.hpp
@@ -0,0 +1,20 @@
+#pragma once
+
+#include "api/plugin-api.hpp"
+#include <QIcon>
+#include <QString>
+
+#include "compat/linkage-macros.hpp"
+
+namespace pt_module
+{
+
+class OTR_GENERIC_EXPORT metadata_pt : public Metadata
+{
+ Q_OBJECT
+
+ QString name() override;
+ QIcon icon() override;
+};
+
+} // ns pt_module
diff --git a/tracker-pt/module/point_extractor.cpp b/tracker-pt/module/point_extractor.cpp
index b67c4036..3329fafc 100644
--- a/tracker-pt/module/point_extractor.cpp
+++ b/tracker-pt/module/point_extractor.cpp
@@ -9,11 +9,11 @@
#include "point_extractor.h"
#include "point_tracker.h"
#include "frame.hpp"
-
#include "cv/numeric.hpp"
#include "compat/math.hpp"
+#include "compat/math-imports.hpp"
-#include <opencv2/videoio.hpp>
+#include <opencv2/imgproc.hpp>
#undef PREVIEW
//#define PREVIEW
@@ -29,12 +29,13 @@
#include <QDebug>
-using namespace types;
-using namespace pt_module;
+using namespace numeric_types;
+
+// meanshift code written by Michael Welter
/*
http://en.wikipedia.org/wiki/Mean-shift
-In this application the idea, is to eliminate any bias of the point estimate
+In this application the idea, is to eliminate any bias of the point estimate
which is introduced by the rather arbitrary thresholded area. One must recognize
that the thresholded area can only move in one pixel increments since it is
binary. Thus, its center of mass might make "jumps" as pixels are added/removed
@@ -43,31 +44,28 @@ With mean-shift, a moving "window" or kernel is multiplied with the gray-scale
image, and the COM is calculated of the result. This is iterated where the
kernel center is set the previously computed COM. Thus, peaks in the image intensity
distribution "pull" the kernel towards themselves. Eventually it stops moving, i.e.
-then the computed COM coincides with the kernel center. We hope that the
+then the computed COM coincides with the kernel center. We hope that the
corresponding location is a good candidate for the extracted point.
-The idea similar to the window scaling suggested in Berglund et al. "Fast, bias-free
+The idea similar to the window scaling suggested in Berglund et al. "Fast, bias-free
algorithm for tracking single particles with variable size and shape." (2008).
*/
-static cv::Vec2d MeanShiftIteration(const cv::Mat &frame_gray, const vec2 &current_center, f filter_width)
+static vec2 MeanShiftIteration(const cv::Mat1b &frame_gray, const vec2 &current_center, f filter_width)
{
- // Most amazingly this function runs faster with doubles than with floats.
- const f s = 1.0 / filter_width;
+ const f s = 1 / filter_width;
f m = 0;
- vec2 com { 0, 0 };
+ vec2 com { 0, 0 };
for (int i = 0; i < frame_gray.rows; i++)
{
- auto frame_ptr = (uint8_t const* restrict_ptr)frame_gray.ptr(i);
+ uint8_t const* const __restrict frame_ptr = frame_gray.ptr(i);
for (int j = 0; j < frame_gray.cols; j++)
{
f val = frame_ptr[j];
- val = val * val; // taking the square wights brighter parts of the image stronger.
- {
- f dx = (j - current_center[0])*s;
- f dy = (i - current_center[1])*s;
- f f = std::fmax(0, 1 - dx*dx - dy*dy);
- val *= f;
- }
+ val = val * val; // taking the square weighs brighter parts of the image stronger.
+ f dx = (j - current_center[0])*s;
+ f dy = (i - current_center[1])*s;
+ f max = std::fmax(f(0), 1 - dx*dx - dy*dy);
+ val *= max;
m += val;
com[0] += j * val;
com[1] += i * val;
@@ -75,13 +73,15 @@ static cv::Vec2d MeanShiftIteration(const cv::Mat &frame_gray, const vec2 &curre
}
if (m > f(.1))
{
- com *= f(1) / m;
+ com *= 1 / m;
return com;
}
else
return current_center;
}
+namespace pt_module {
+
PointExtractor::PointExtractor(const QString& module_name) : s(module_name)
{
blobs.reserve(max_blobs);
@@ -89,24 +89,19 @@ PointExtractor::PointExtractor(const QString& module_name) : s(module_name)
void PointExtractor::ensure_channel_buffers(const cv::Mat& orig_frame)
{
- if (ch[0].rows != orig_frame.rows || ch[0].cols != orig_frame.cols)
- for (unsigned k = 0; k < 3; k++)
- ch[k] = cv::Mat1b(orig_frame.rows, orig_frame.cols);
+ for (cv::Mat1b& x : ch)
+ x.create(orig_frame.rows, orig_frame.cols);
}
void PointExtractor::ensure_buffers(const cv::Mat& frame)
{
const int W = frame.cols, H = frame.rows;
- if (frame_gray.rows != W || frame_gray.cols != H)
- {
- frame_gray = cv::Mat1b(H, W);
- frame_bin = cv::Mat1b(H, W);
- frame_blobs = cv::Mat1b(H, W);
- }
+ frame_gray.create(H, W);
+ frame_bin.create(H, W);
}
-void PointExtractor::extract_single_channel(const cv::Mat& orig_frame, int idx, cv::Mat& dest)
+void PointExtractor::extract_single_channel(const cv::Mat& orig_frame, int idx, cv::Mat1b& dest)
{
ensure_channel_buffers(orig_frame);
@@ -117,17 +112,50 @@ void PointExtractor::extract_single_channel(const cv::Mat& orig_frame, int idx,
cv::mixChannels(&orig_frame, 1, &dest, 1, from_to, 1);
}
-void PointExtractor::extract_channels(const cv::Mat& orig_frame, const int* order, int order_npairs)
+void PointExtractor::filter_single_channel(const cv::Mat& orig_frame, float r, float g, float b, bool overexp, cv::Mat1b& dest)
{
ensure_channel_buffers(orig_frame);
- cv::mixChannels(&orig_frame, 1, (cv::Mat*) ch, order_npairs, order, order_npairs);
+ // just filter for colour or also include overexposed regions?
+ if (!overexp)
+ cv::transform(orig_frame, dest, cv::Mat(cv::Matx13f(b, g, r)));
+ else
+ {
+ for (int i = 0; i < orig_frame.rows; i++)
+ {
+ cv::Vec3b const* const __restrict orig_ptr = orig_frame.ptr<cv::Vec3b>(i);
+ uint8_t* const __restrict dest_ptr = dest.ptr(i);
+ for (int j = 0; j < orig_frame.cols; j++)
+ {
+ // get the intensity of the key color (i.e. +ve coefficients)
+ uchar blue = orig_ptr[j][0], green = orig_ptr[j][1], red = orig_ptr[j][2];
+ float key = std::max(b, 0.0f) * blue + std::max(g, 0.0f) * green + std::max(r, 0.0f) * red;
+ // get the intensity of the non-key color (i.e. -ve coefficients)
+ float nonkey = std::max(-b, 0.0f) * blue + std::max(-g, 0.0f) * green + std::max(-r, 0.0f) * red;
+ // the result is key color minus non-key color inversely weighted by key colour intensity
+ dest_ptr[j] = std::max(0.0f, std::min(255.0f, key - (255.0f - key) / 255.0f * nonkey));
+ }
+ }
+ }
}
void PointExtractor::color_to_grayscale(const cv::Mat& frame, cv::Mat1b& output)
{
+ if (frame.channels() == 1)
+ {
+ output.create(frame.rows, frame.cols);
+ frame.copyTo(output);
+ return;
+ }
+
+ const float half_chr_key_str = *s.chroma_key_strength * 0.5;
switch (s.blob_color)
{
+ case pt_color_green_only:
+ {
+ extract_single_channel(frame, 1, output);
+ break;
+ }
case pt_color_blue_only:
{
extract_single_channel(frame, 0, output);
@@ -138,18 +166,44 @@ void PointExtractor::color_to_grayscale(const cv::Mat& frame, cv::Mat1b& output)
extract_single_channel(frame, 2, output);
break;
}
- case pt_color_average:
+ case pt_color_red_chromakey:
+ {
+ filter_single_channel(frame, 1, -half_chr_key_str, -half_chr_key_str, s.chroma_key_overexposed, output);
+ break;
+ }
+ case pt_color_green_chromakey:
{
- const int W = frame.cols, H = frame.rows;
- const cv::Mat tmp = frame.reshape(1, W * H);
- cv::Mat output_ = output.reshape(1, W * H);
- cv::reduce(tmp, output_, 1, cv::REDUCE_AVG);
+ filter_single_channel(frame, -half_chr_key_str, 1, -half_chr_key_str, s.chroma_key_overexposed, output);
break;
}
+ case pt_color_blue_chromakey:
+ {
+ filter_single_channel(frame, -half_chr_key_str, -half_chr_key_str, 1, s.chroma_key_overexposed, output);
+ break;
+ }
+ case pt_color_cyan_chromakey:
+ {
+ filter_single_channel(frame, -*s.chroma_key_strength, 0.5, 0.5, s.chroma_key_overexposed, output);
+ break;
+ }
+ case pt_color_yellow_chromakey:
+ {
+ filter_single_channel(frame, 0.5, 0.5, -*s.chroma_key_strength, s.chroma_key_overexposed, output);
+ break;
+ }
+ case pt_color_magenta_chromakey:
+ {
+ filter_single_channel(frame, 0.5, -*s.chroma_key_strength, 0.5, s.chroma_key_overexposed, output);
+ break;
+ }
+ case pt_color_hardware:
+ eval_once(qDebug() << "camera driver doesn't support grayscale");
+ goto do_grayscale;
default:
- once_only(qDebug() << "wrong pt_color_type enum value" << int(s.blob_color));
- /*FALLTHROUGH*/
- case pt_color_natural:
+ eval_once(qDebug() << "wrong pt_color_type enum value" << int(s.blob_color));
+ [[fallthrough]];
+ case pt_color_bt709:
+do_grayscale:
cv::cvtColor(frame, output, cv::COLOR_BGR2GRAY);
break;
}
@@ -175,31 +229,97 @@ void PointExtractor::threshold_image(const cv::Mat& frame_gray, cv::Mat1b& outpu
cv::noArray(),
hist,
1,
- (int const*) &hist_size,
+ &hist_size,
&ranges);
- const f radius = (f) threshold_radius_value(frame_gray.cols, frame_gray.rows, threshold_slider_value);
+ const f radius = threshold_radius_value(frame_gray.cols, frame_gray.rows, threshold_slider_value);
- auto ptr = (float const* const restrict_ptr) hist.ptr(0);
- const unsigned area = uround(3 * M_PI * radius*radius);
+ float const* const __restrict ptr = hist.ptr<float>(0);
+ const unsigned area = unsigned(iround(3 * pi * radius*radius));
const unsigned sz = unsigned(hist.cols * hist.rows);
- unsigned thres = 32;
- for (unsigned i = sz-1, cnt = 0; i > 32; i--)
+ unsigned thres = 1;
+ for (unsigned i = sz-1, cnt = 0; i > 1; i--)
{
- cnt += ptr[i];
+ cnt += (unsigned)ptr[i];
if (cnt >= area)
break;
thres = i;
}
- cv::threshold(frame_gray, output, thres, 255, CV_THRESH_BINARY);
+ cv::threshold(frame_gray, output, thres, 255, cv::THRESH_BINARY);
}
}
-void PointExtractor::extract_points(const pt_frame& frame_, pt_preview& preview_frame_, std::vector<vec2>& points)
+static void draw_blobs(cv::Mat& preview_frame, const blob* blobs, unsigned nblobs, const cv::Size& size)
+{
+ for (unsigned k = 0; k < nblobs; k++)
+ {
+ const blob& b = blobs[k];
+
+ if (b.radius < 0)
+ continue;
+
+ const f dpi = preview_frame.cols / f(320);
+ const f offx = 10 * dpi, offy = f(7.5) * dpi;
+
+ const f cx = preview_frame.cols / f(size.width),
+ cy = preview_frame.rows / f(size.height),
+ c = std::fmax(f(1), cx+cy)/2;
+
+ cv::Point p(iround(b.pos[0] * cx), iround(b.pos[1] * cy));
+
+ auto outline_color = k >= PointModel::N_POINTS
+ ? cv::Scalar(192, 192, 192)
+ : cv::Scalar(255, 255, 0);
+
+ cv::ellipse(preview_frame, p,
+ {iround(b.rect.width/(f)2+2*c), iround(b.rect.height/(f)2+2*c)},
+ 0, 0, 360, outline_color, iround(dpi), cv::LINE_AA);
+
+ char buf[16];
+ std::snprintf(buf, sizeof(buf), "%.2fpx", (double)b.radius);
+
+ auto text_color = k >= PointModel::N_POINTS
+ ? cv::Scalar(160, 160, 160)
+ : cv::Scalar(0, 0, 255);
+
+ cv::Point pos(iround(b.pos[0]*cx+offx), iround(b.pos[1]*cy+offy));
+ cv::putText(preview_frame, buf, pos,
+ cv::FONT_HERSHEY_PLAIN, iround(dpi), text_color,
+ 1);
+ }
+}
+
+static vec2 meanshift_initial_guess(const cv::Rect rect, cv::Mat& frame_roi)
+{
+ vec2 ret = {rect.width/(f)2, rect.height/(f)2};
+
+ // compute center initial guess
+ double ynorm = 0, xnorm = 0, y = 0, x = 0;
+ for (int j = 0; j < rect.height; j++)
+ {
+ const unsigned char* __restrict ptr = frame_roi.ptr<unsigned char>(j);
+ for (int i = 0; i < rect.width; i++)
+ {
+ double val = ptr[i] * 1./255;
+ x += i * val;
+ y += j * val;
+ xnorm += val;
+ ynorm += val;
+ }
+ }
+ constexpr double eps = 1e-4;
+ if (xnorm > eps && ynorm > eps)
+ ret = { (f)(x / xnorm), (f)(y / ynorm) };
+ return ret;
+}
+
+void PointExtractor::extract_points(const pt_frame& frame_,
+ pt_preview& preview_frame_,
+ bool preview_visible,
+ std::vector<vec2>& points)
{
const cv::Mat& frame = frame_.as_const<Frame>()->mat;
- cv::Mat& preview_frame = *preview_frame_.as<Preview>();
ensure_buffers(frame);
color_to_grayscale(frame, frame_gray);
@@ -211,24 +331,24 @@ void PointExtractor::extract_points(const pt_frame& frame_, pt_preview& preview_
threshold_image(frame_gray, frame_bin);
- blobs.clear();
- frame_bin.copyTo(frame_blobs);
-
- const f region_size_min = s.min_point_size;
- const f region_size_max = s.max_point_size;
+ const f region_size_min = (f)s.min_point_size;
+ const f region_size_max = (f)s.max_point_size;
unsigned idx = 0;
- for (int y=0; y < frame_blobs.rows; y++)
+
+ blobs.clear();
+
+ for (int y=0; y < frame_bin.rows; y++)
{
- const unsigned char* ptr_bin = frame_blobs.ptr(y);
- for (int x=0; x < frame_blobs.cols; x++)
+ const unsigned char* __restrict ptr_bin = frame_bin.ptr(y);
+ for (int x=0; x < frame_bin.cols; x++)
{
if (ptr_bin[x] != 255)
continue;
idx = blobs.size() + 1;
cv::Rect rect;
- cv::floodFill(frame_blobs,
+ cv::floodFill(frame_bin,
cv::Point(x,y),
cv::Scalar(idx),
&rect,
@@ -244,8 +364,8 @@ void PointExtractor::extract_points(const pt_frame& frame_, pt_preview& preview_
for (int i=rect.y; i < ymax; i++)
{
- unsigned char const* const restrict_ptr ptr_blobs = frame_blobs.ptr(i);
- unsigned char const* const restrict_ptr ptr_gray = frame_gray.ptr(i);
+ unsigned char const* const __restrict ptr_blobs = frame_bin.ptr(i);
+ unsigned char const* const __restrict ptr_gray = frame_gray.ptr(i);
for (int j=rect.x; j < xmax; j++)
{
if (ptr_blobs[j] != idx)
@@ -257,12 +377,12 @@ void PointExtractor::extract_points(const pt_frame& frame_, pt_preview& preview_
}
}
- const double radius = std::sqrt(cnt / M_PI);
+ const f radius = std::sqrt((f)cnt) / std::sqrt(pi);
if (radius > region_size_max || radius < region_size_min)
continue;
blobs.emplace_back(radius,
- vec2(rect.width/2., rect.height/2.),
+ vec2(rect.width/f(2), rect.height/f(2)),
std::pow(f(norm), f(1.1))/cnt,
rect);
@@ -272,9 +392,7 @@ void PointExtractor::extract_points(const pt_frame& frame_, pt_preview& preview_
// XXX we could go to the next scanline unless the points are really small.
// i'd expect each point being present on at least one unique scanline
// but it turns out some people are using 2px points -sh 20180110
-#if BROKEN && 0
- break;
-#endif
+ //break;
}
}
end:
@@ -288,29 +406,22 @@ end:
for (idx = 0; idx < sz; ++idx)
{
- blob &b = blobs[idx];
- cv::Rect rect = b.rect;
-
- rect.x -= rect.width / 2;
- rect.y -= rect.height / 2;
- rect.width *= 2;
- rect.height *= 2;
- rect &= cv::Rect(0, 0, W, H); // crop at frame boundaries
-
+ blob& b = blobs[idx];
+ cv::Rect rect = b.rect & cv::Rect(0, 0, W, H); // crop at frame boundaries
cv::Mat frame_roi = frame_gray(rect);
// smaller values mean more changes. 1 makes too many changes while 1.5 makes about .1
static constexpr f radius_c = f(1.75);
const f kernel_radius = b.radius * radius_c;
- vec2 pos(rect.width/2., rect.height/2.); // position relative to ROI.
+ vec2 pos = meanshift_initial_guess(rect, frame_roi); // position relative to ROI.
for (int iter = 0; iter < 10; ++iter)
{
vec2 com_new = MeanShiftIteration(frame_roi, pos, kernel_radius);
vec2 delta = com_new - pos;
pos = com_new;
- if (delta.dot(delta) < 1e-2)
+ if (delta.dot(delta) < f(1e-3))
break;
}
@@ -318,43 +429,11 @@ end:
b.pos[1] = pos[1] + rect.y;
}
- for (unsigned k = 0; k < blobs.size(); k++)
- {
- blob& b = blobs[k];
-
- const f dpi = preview_frame.cols / f(320);
- const f offx = 10 * dpi, offy = 7.5 * dpi;
-
- const f cx = preview_frame.cols / f(frame.cols),
- cy = preview_frame.rows / f(frame.rows),
- c_ = (cx+cy)/2;
-
- static constexpr unsigned fract_bits = 16;
- static constexpr double c_fract(1 << fract_bits);
-
- cv::Point p(iround(b.pos[0] * cx * c_fract), iround(b.pos[1] * cy * c_fract));
+ if (preview_visible)
+ draw_blobs(preview_frame_.as<Frame>()->mat,
+ blobs.data(), blobs.size(),
+ frame_gray.size());
- auto circle_color = k >= PointModel::N_POINTS
- ? cv::Scalar(192, 192, 192)
- : cv::Scalar(255, 255, 0);
-
- const f overlay_size = dpi > 1.5 ? 2 : 1;
-
- cv::circle(preview_frame, p, iround((b.radius + 3.3) * c_ * c_fract), circle_color, overlay_size, cv::LINE_AA, fract_bits);
-
- char buf[16];
- buf[sizeof(buf)-1] = '\0';
- std::snprintf(buf, sizeof(buf) - 1, "%.2fpx", b.radius);
-
- auto text_color = k >= PointModel::N_POINTS
- ? cv::Scalar(160, 160, 160)
- : cv::Scalar(0, 0, 255);
-
- cv::Point pos(iround(b.pos[0]*cx+offx), iround(b.pos[1]*cy+offy));
- cv::putText(preview_frame, buf, pos,
- cv::FONT_HERSHEY_PLAIN, overlay_size, text_color,
- 1);
- }
// End of mean shift code. At this point, blob positions are updated with hopefully less noisy less biased values.
points.reserve(max_blobs);
@@ -375,3 +454,5 @@ blob::blob(f radius, const vec2& pos, f brightness, const cv::Rect& rect) :
{
//qDebug() << "radius" << radius << "pos" << pos[0] << pos[1];
}
+
+} // ns pt_module
diff --git a/tracker-pt/module/point_extractor.h b/tracker-pt/module/point_extractor.h
index eac2268c..fbfdbb0b 100644
--- a/tracker-pt/module/point_extractor.h
+++ b/tracker-pt/module/point_extractor.h
@@ -9,17 +9,15 @@
#pragma once
#include "pt-api.hpp"
-
+#include <opencv2/core/mat.hpp>
+#include <opencv2/core/types.hpp>
#include <vector>
-#include <opencv2/core.hpp>
-#include <opencv2/imgproc.hpp>
-
namespace pt_module {
-using namespace types;
+using namespace numeric_types;
-struct blob
+struct blob final
{
f radius, brightness;
vec2 pos;
@@ -33,14 +31,18 @@ class PointExtractor final : public pt_point_extractor
public:
// extracts points from frame and draws some processing info into frame, if draw_output is set
// dt: time since last call in seconds
- void extract_points(const pt_frame& frame, pt_preview& preview_frame, std::vector<vec2>& points) override;
- PointExtractor(const QString& module_name);
+ void extract_points(const pt_frame& frame,
+ pt_preview& preview_frame, bool preview_visible,
+ std::vector<vec2>& points) override;
+
+ explicit PointExtractor(const QString& module_name);
+
private:
- static constexpr inline int max_blobs = 16;
+ static constexpr int max_blobs = 16;
pt_settings s;
- cv::Mat1b frame_gray, frame_bin, frame_blobs;
+ cv::Mat1b frame_bin, frame_gray;
cv::Mat1f hist;
std::vector<blob> blobs;
cv::Mat1b ch[3];
@@ -48,8 +50,8 @@ private:
void ensure_channel_buffers(const cv::Mat& orig_frame);
void ensure_buffers(const cv::Mat& frame);
- void extract_single_channel(const cv::Mat& orig_frame, int idx, cv::Mat& dest);
- void extract_channels(const cv::Mat& orig_frame, const int* order, int order_npairs);
+ void extract_single_channel(const cv::Mat& orig_frame, int idx, cv::Mat1b& dest);
+ void filter_single_channel(const cv::Mat& orig_frame, float r, float g, float b, bool overexp, cv::Mat1b& dest);
void color_to_grayscale(const cv::Mat& frame, cv::Mat1b& output);
void threshold_image(const cv::Mat& frame_gray, cv::Mat1b& output);