/* Copyright (c) 2012 Patrick Ruoff * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. */ #include "camera_kinect_ir.h" #if __has_include(<Kinect.h>) #include "frame.hpp" #include "compat/sleep.hpp" #include "compat/camera-names.hpp" #include "compat/math-imports.hpp" #include <opencv2/imgproc.hpp> #include "cv/video-property-page.hpp" #include <cstdlib> namespace pt_module { CameraKinectIr::CameraKinectIr(const QString& module_name) : s { module_name } { } QString CameraKinectIr::get_desired_name() const { return desired_name; } QString CameraKinectIr::get_active_name() const { return active_name; } void CameraKinectIr::show_camera_settings() { const int idx = camera_name_to_index(s.camera_name); /* if (cap && cap->isOpened()) video_property_page::show_from_capture(*cap, idx); else video_property_page::show(idx); */ } CameraKinectIr::result CameraKinectIr::get_info() const { if (cam_info.res_x == 0 || cam_info.res_y == 0) return { false, pt_camera_info() }; else return { true, cam_info }; } CameraKinectIr::result CameraKinectIr::get_frame(pt_frame& frame_) { cv::Mat& frame = frame_.as<Frame>()->mat; const bool new_frame = get_frame_(frame); if (new_frame) { const f dt = (f)t.elapsed_seconds(); t.start(); // measure fps of valid frames constexpr f RC = f{1}/10; // seconds const f alpha = dt/(dt + RC); if (dt_mean < dt_eps) dt_mean = dt; else dt_mean = (1-alpha) * dt_mean + alpha * dt; cam_info.fps = dt_mean > dt_eps ? 1 / dt_mean : 0; cam_info.res_x = frame.cols; cam_info.res_y = frame.rows; cam_info.fov = fov; return { true, cam_info }; } else return { false, {} }; } // Safe release for interfaces template<class Interface> inline void SafeRelease(Interface *& pInterfaceToRelease) { if (pInterfaceToRelease != NULL) { pInterfaceToRelease->Release(); pInterfaceToRelease = NULL; } } bool CameraKinectIr::start(int idx, int fps, int res_x, int res_y) { if (idx >= 0 && fps >= 0 && res_x >= 0 && res_y >= 0) { if (cam_desired.idx != idx || (int)cam_desired.fps != fps || cam_desired.res_x != res_x || cam_desired.res_y != res_y ) { stop(); desired_name = get_camera_names().value(idx); bool kinectIRSensor = false; if (desired_name.compare(KKinectIRSensor) == 0) { kinectIRSensor = true; } cam_desired.idx = idx; cam_desired.fps = fps; cam_desired.res_x = res_x; cam_desired.res_y = res_y; cam_desired.fov = fov; HRESULT hr; // Get and open Kinect sensor hr = GetDefaultKinectSensor(&iKinectSensor); if (SUCCEEDED(hr)) { hr = iKinectSensor->Open(); } // Create infrared frame reader if (SUCCEEDED(hr)) { // Initialize the Kinect and get the infrared reader IInfraredFrameSource* pInfraredFrameSource = NULL; hr = iKinectSensor->Open(); if (SUCCEEDED(hr)) { hr = iKinectSensor->get_InfraredFrameSource(&pInfraredFrameSource); } if (SUCCEEDED(hr)) { hr = pInfraredFrameSource->OpenReader(&iInfraredFrameReader); } SafeRelease(pInfraredFrameSource); } //if (cap->isOpened()) { cam_info = pt_camera_info(); cam_info.idx = idx; dt_mean = 0; active_name = desired_name; cv::Mat tmp; if (get_frame_(tmp)) { t.start(); return true; } } return false; } return true; } stop(); return false; } void CameraKinectIr::stop() { // done with infrared frame reader SafeRelease(iInfraredFrame); SafeRelease(iInfraredFrameReader); // close the Kinect Sensor if (iKinectSensor) { iKinectSensor->Close(); } SafeRelease(iKinectSensor); desired_name = QString{}; active_name = QString{}; cam_info = {}; cam_desired = {}; } bool CameraKinectIr::get_frame_(cv::Mat& frame) { if (!iInfraredFrameReader) { return false; } bool success = false; // Release previous frame if any SafeRelease(iInfraredFrame); HRESULT hr = iInfraredFrameReader->AcquireLatestFrame(&iInfraredFrame); if (SUCCEEDED(hr)) { INT64 nTime = 0; IFrameDescription* frameDescription = NULL; int nWidth = 0; int nHeight = 0; float diagonalFieldOfView = 0.0f; UINT nBufferSize = 0; UINT16 *pBuffer = NULL; hr = iInfraredFrame->get_RelativeTime(&nTime); if (SUCCEEDED(hr)) { hr = iInfraredFrame->get_FrameDescription(&frameDescription); } if (SUCCEEDED(hr)) { hr = frameDescription->get_Width(&nWidth); } if (SUCCEEDED(hr)) { hr = frameDescription->get_Height(&nHeight); } if (SUCCEEDED(hr)) { hr = frameDescription->get_DiagonalFieldOfView(&diagonalFieldOfView); } if (SUCCEEDED(hr)) { hr = iInfraredFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer); } if (SUCCEEDED(hr)) { //ProcessInfrared(nTime, pBuffer, nWidth, nHeight); // Create an OpenCV matrix with our 16-bits IR buffer cv::Mat raw = cv::Mat(nHeight, nWidth, CV_16UC1, pBuffer); // .clone(); // Convert that OpenCV matrix to an RGB one as this is what is expected by our point extractor // TODO: Ideally we should implement a point extractors that works with our native buffer // First resample to 8-bits double min = std::numeric_limits<uint16_t>::min(); double max = std::numeric_limits<uint16_t>::max(); // For scalling to have more precission in the range we are interrested in min = max - 255; //cv::minMaxLoc(raw, &min, &max); // Should we use 16bit min and max instead? cv::Mat raw8; raw.convertTo(raw8, CV_8U, 255.0 / (max - min), -255.0*min / (max - min)); // Second convert to RGB cv::cvtColor(raw8, frame, cv::COLOR_GRAY2BGR); int newType = frame.type(); success = true; } SafeRelease(frameDescription); } return success; /* if (cap && cap->isOpened()) { for (unsigned i = 0; i < 10; i++) { if (cap->read(frame)) return true; portable::sleep(50); } } return false; */ } void CameraKinectIr::camera_deleter::operator()(cv::VideoCapture* cap) { if (cap) { if (cap->isOpened()) cap->release(); delete cap; } } } // ns pt_module #endif