summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tracker-easy/cv-point-extractor.cpp51
-rw-r--r--tracker-easy/cv-point-extractor.h8
-rw-r--r--tracker-easy/preview.cpp25
-rw-r--r--tracker-easy/preview.h1
-rw-r--r--tracker-easy/tracker-easy.cpp6
-rw-r--r--tracker-kinect-face/camera_kinect_ir.cpp20
-rw-r--r--video/camera.hpp2
7 files changed, 84 insertions, 29 deletions
diff --git a/tracker-easy/cv-point-extractor.cpp b/tracker-easy/cv-point-extractor.cpp
index 368f5587..c7a3c958 100644
--- a/tracker-easy/cv-point-extractor.cpp
+++ b/tracker-easy/cv-point-extractor.cpp
@@ -32,19 +32,62 @@ namespace EasyTracker
}
- void CvPointExtractor::extract_points(const cv::Mat& frame, cv::Mat* aPreview, std::vector<vec2>& aPoints)
+ void CvPointExtractor::extract_points(const cv::Mat& aFrame, cv::Mat* aPreview, std::vector<vec2>& aPoints)
{
+ //TODO: Assert if channel size is neither one nor two
+ // Make sure our frame channel is 8 bit
+ size_t channelSize = aFrame.elemSize1();
+ if (channelSize == 2)
+ {
+ // We have a 16 bits single channel. Typically coming from Kinect V2 IR sensor
+ // Resample to 8-bits
+ double min = std::numeric_limits<uint16_t>::min();
+ double max = std::numeric_limits<uint16_t>::max();
+ //cv::minMaxLoc(raw, &min, &max); // Should we use 16bit min and max instead?
+ // For scalling to have more precission in the range we are interrested in
+ min = max - 255;
+ // See: https://stackoverflow.com/questions/14539498/change-type-of-mat-object-from-cv-32f-to-cv-8u/14539652
+ aFrame.convertTo(iFrameChannelSizeOne, CV_8U, 255.0 / (max - min), -255.0*min / (max - min));
+ }
+ else
+ {
+ iFrameChannelSizeOne = aFrame;
+ }
+
+
+ // Make sure our frame has a single channel
+ // Make an extra copy if needed
+ const int channelCount = iFrameChannelSizeOne.channels();
+ if (channelCount == 3)
+ {
+ // Convert to grayscale
+ // TODO: What's our input format, BRG or RGB?
+ // That won't make our point extraction work but at least it won't crash
+ cv::cvtColor(iFrameChannelSizeOne, iFrameGray, cv::COLOR_BGR2GRAY);
+ // TODO: Instead convert to HSV and use a key color together with cv::inRange to sport the color we want.
+ // Key color should be defined in settings.
+ }
+ else if (channelCount == 1)
+ {
+ // No further convertion needed
+ iFrameGray = iFrameChannelSizeOne;
+ }
+ else
+ {
+ eval_once(qDebug() << "tracker/easy: camera frame depth not supported" << aFrame.channels());
+ return;
+ }
// Contours detection
std::vector<std::vector<cv::Point> > contours;
- cv::findContours(frame, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
+ cv::findContours(iFrameGray, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
// Workout which countours are valid points
for (size_t i = 0; i < contours.size(); i++)
{
if (aPreview)
{
- cv::drawContours(*aPreview, contours, i, CV_RGB(255, 0, 0), 2);
+ cv::drawContours(*aPreview, contours, (int)i, CV_RGB(255, 0, 0), 2);
}
@@ -87,7 +130,7 @@ namespace EasyTracker
while (aPoints.size() > 3) // Until we have no more than three points
{
int maxY = 0;
- int index = -1;
+ size_t index = -1;
// Search for the point with highest Y coordinate
for (size_t i = 0; i < aPoints.size(); i++)
diff --git a/tracker-easy/cv-point-extractor.h b/tracker-easy/cv-point-extractor.h
index 18229d4a..677be292 100644
--- a/tracker-easy/cv-point-extractor.h
+++ b/tracker-easy/cv-point-extractor.h
@@ -27,10 +27,14 @@ namespace EasyTracker
public:
// extracts points from frame and draws some processing info into frame, if draw_output is set
// dt: time since last call in seconds
- void extract_points(const cv::Mat& frame, cv::Mat* aPreview, std::vector<vec2>& aPoints) override;
+ void extract_points(const cv::Mat& aFrame, cv::Mat* aPreview, std::vector<vec2>& aPoints) override;
CvPointExtractor();
-
+ // Settings
pt_settings s;
+ // Our frame with a channel size of 8 bits
+ cv::Mat iFrameChannelSizeOne;
+ // Our frame with a single 8 bits channel
+ cv::Mat iFrameGray;
};
}
diff --git a/tracker-easy/preview.cpp b/tracker-easy/preview.cpp
index 404ad299..7f245ae4 100644
--- a/tracker-easy/preview.cpp
+++ b/tracker-easy/preview.cpp
@@ -19,18 +19,37 @@ namespace EasyTracker
Preview& Preview::operator=(const cv::Mat& aFrame)
{
+ //TODO: Assert if channel size is neither one nor two
+
+ // Make sure our frame channel is 8 bit
+ size_t channelSize = aFrame.elemSize1();
+ if (channelSize == 2)
+ {
+ // First resample to 8-bits
+ double min = std::numeric_limits<uint16_t>::min();
+ double max = std::numeric_limits<uint16_t>::max();
+ //cv::minMaxLoc(raw, &min, &max); // Should we use 16bit min and max instead?
+ // For scalling to have more precission in the range we are interrested in
+ //min = max - 255;
+ // See: https://stackoverflow.com/questions/14539498/change-type-of-mat-object-from-cv-32f-to-cv-8u/14539652
+ aFrame.convertTo(iFrameChannelSizeOne, CV_8U, 255.0 / (max - min), -255.0*min / (max - min));
+ }
+ else
+ {
+ iFrameChannelSizeOne = aFrame;
+ }
// Make sure our frame is RGB
// Make an extra copy if needed
- int channelCount = aFrame.channels();
+ int channelCount = iFrameChannelSizeOne.channels();
if (channelCount == 1)
{
// Convert to RGB
- cv::cvtColor(aFrame, iFrameRgb, cv::COLOR_GRAY2BGR);
+ cv::cvtColor(iFrameChannelSizeOne, iFrameRgb, cv::COLOR_GRAY2BGR);
}
else if (channelCount == 3)
{
- iFrameRgb = aFrame;
+ iFrameRgb = iFrameChannelSizeOne;
}
else
{
diff --git a/tracker-easy/preview.h b/tracker-easy/preview.h
index c3ed9f6b..03de6684 100644
--- a/tracker-easy/preview.h
+++ b/tracker-easy/preview.h
@@ -34,6 +34,7 @@ namespace EasyTracker
public:
cv::Mat iFrameResized, frame_out;
cv::Mat iFrameRgb;
+ cv::Mat iFrameChannelSizeOne;
};
}
diff --git a/tracker-easy/tracker-easy.cpp b/tracker-easy/tracker-easy.cpp
index 42666677..083bd951 100644
--- a/tracker-easy/tracker-easy.cpp
+++ b/tracker-easy/tracker-easy.cpp
@@ -126,9 +126,9 @@ namespace EasyTracker
if (new_frame)
{
- //TODO: We should not assume channel size of 1 byte
- // Though in practice since cv::findContours needs CV_8U we would still need to convert our frame from 16 bits to 8 bits.
- iMatFrame = cv::Mat(iFrame.height, iFrame.width, CV_MAKETYPE(CV_8U, iFrame.channels), iFrame.data, iFrame.stride);
+ // Create OpenCV matrix from our frame
+ // TODO: Assert channel size is one or two
+ iMatFrame = cv::Mat(iFrame.height, iFrame.width, CV_MAKETYPE((iFrame.channelSize == 2 ? CV_16U : CV_8U), iFrame.channels), iFrame.data, iFrame.stride);
const bool preview_visible = check_is_visible();
diff --git a/tracker-kinect-face/camera_kinect_ir.cpp b/tracker-kinect-face/camera_kinect_ir.cpp
index b1856f47..77621076 100644
--- a/tracker-kinect-face/camera_kinect_ir.cpp
+++ b/tracker-kinect-face/camera_kinect_ir.cpp
@@ -100,6 +100,7 @@ std::tuple<const video::impl::frame&, bool> CameraKinectIr::get_frame()
iFrame.height = 424;
iFrame.stride = cv::Mat::AUTO_STEP;
iFrame.channels = iMatFrame.channels();
+ iFrame.channelSize = iMatFrame.elemSize1();
return { iFrame, new_frame };
}
@@ -205,7 +206,7 @@ void CameraKinectIr::stop()
iMatFrame = cv::Mat();
}
-bool CameraKinectIr::get_frame_(cv::Mat& frame)
+bool CameraKinectIr::get_frame_(cv::Mat& aFrame)
{
if (!iInfraredFrameReader)
@@ -266,22 +267,9 @@ bool CameraKinectIr::get_frame_(cv::Mat& frame)
if (SUCCEEDED(hr))
{
- //ProcessInfrared(nTime, pBuffer, nWidth, nHeight);
-
// Create an OpenCV matrix with our 16-bits IR buffer
- cv::Mat raw = cv::Mat(height, width, CV_16UC1, pBuffer, cv::Mat::AUTO_STEP);
-
- // Convert that OpenCV matrix to an RGB one as this is what is expected by our point extractor
- // TODO: Ideally we should implement a point extractors that works with our native buffer
- // First resample to 8-bits
- double min = std::numeric_limits<uint16_t>::min();
- double max = std::numeric_limits<uint16_t>::max();
- //cv::minMaxLoc(raw, &min, &max); // Should we use 16bit min and max instead?
- // For scalling to have more precission in the range we are interrested in
- min = max - 255;
- // See: https://stackoverflow.com/questions/14539498/change-type-of-mat-object-from-cv-32f-to-cv-8u/14539652
- raw.convertTo(frame, CV_8U, 255.0 / (max - min), -255.0*min / (max - min));
- //
+ aFrame = cv::Mat(height, width, CV_16UC1, pBuffer, cv::Mat::AUTO_STEP);
+ // Any processing of the frame is left to the user
success = true;
}
}
diff --git a/video/camera.hpp b/video/camera.hpp
index c8eff3b9..1cda8839 100644
--- a/video/camera.hpp
+++ b/video/camera.hpp
@@ -23,7 +23,7 @@ struct frame final
// the `stride' member can have a special value of zero,
// signifying equal to width * element size
int width = 0, height = 0, stride = 0, channels = 0;
- // TODO: Add channel size instead of assuming 1 byte
+ int channelSize = 1;
};
} // ns video