diff options
| author | Stanislaw Halik <sthalik@misaki.pl> | 2015-12-19 20:44:41 +0100 | 
|---|---|---|
| committer | Stanislaw Halik <sthalik@misaki.pl> | 2015-12-19 20:46:28 +0100 | 
| commit | fba00166c94f066bf0d8d2174b508d2f849abe53 (patch) | |
| tree | 71eac5737e99bcc435ce2658cd459193f6d1f5d7 | |
| parent | 90138a999b4c95afeb9a49d355b0234b6145e221 (diff) | |
tracker/pt: don't copy points array needlessly
| -rw-r--r-- | tracker-pt/ftnoir_tracker_pt.cpp | 47 | ||||
| -rw-r--r-- | tracker-pt/point_extractor.cpp | 13 | ||||
| -rw-r--r-- | tracker-pt/point_extractor.h | 2 | 
3 files changed, 34 insertions, 28 deletions
| diff --git a/tracker-pt/ftnoir_tracker_pt.cpp b/tracker-pt/ftnoir_tracker_pt.cpp index 9a4fa037..88356771 100644 --- a/tracker-pt/ftnoir_tracker_pt.cpp +++ b/tracker-pt/ftnoir_tracker_pt.cpp @@ -97,17 +97,13 @@ void Tracker_PT::run()          if (new_frame && !frame_.empty())          { -            std::vector<cv::Vec2f> points = point_extractor.extract_points(frame); - -            // blobs are sorted in order of circularity -            if (points.size() > PointModel::N_POINTS) -                points.resize(PointModel::N_POINTS); +            const auto& points = point_extractor.extract_points(frame_);              float fx;              if (!get_focal_length(fx))                  continue; -            const bool success = points.size() == PointModel::N_POINTS; +            const bool success = points.size() >= PointModel::N_POINTS;              if (success)              { @@ -116,35 +112,36 @@ void Tracker_PT::run()              }              Affine X_CM = pose(); - -            { -                Affine X_MH(cv::Matx33f::eye(), cv::Vec3f(s.t_MH_x, s.t_MH_y, s.t_MH_z)); // just copy pasted these lines from below -                Affine X_GH = X_CM * X_MH; -                cv::Vec3f p = X_GH.t; // head (center?) position in global space -                cv::Vec2f p_(p[0] / p[2] * fx, p[1] / p[2] * fx);  // projected to screen -                points.push_back(p_); -            } - -            for (unsigned i = 0; i < points.size(); i++) +             +            std::function<void(const cv::Vec2f&, const cv::Scalar)> fun = [&](const cv::Vec2f& p, const cv::Scalar color)              { -                auto& p = points[i]; -                auto p2 = cv::Point(p[0] * frame.cols + frame.cols/2, -p[1] * frame.cols + frame.rows/2); -                cv::Scalar color(0, 255, 0); -                if (i == points.size()-1) -                    color = cv::Scalar(0, 0, 255); -                cv::line(frame, +                auto p2 = cv::Point(p[0] * frame_.cols + frame_.cols/2, -p[1] * frame_.cols + frame_.rows/2); +                cv::line(frame_,                           cv::Point(p2.x - 20, p2.y),                           cv::Point(p2.x + 20, p2.y),                           color,                           4); -                cv::line(frame, +                cv::line(frame_,                           cv::Point(p2.x, p2.y - 20),                           cv::Point(p2.x, p2.y + 20),                           color, -                         4); +                         4);                 +            }; + +            for (unsigned i = 0; i < points.size(); i++) +            { +                fun(points[i], cv::Scalar(0, 255, 0)); +            } +             +            { +                Affine X_MH(cv::Matx33f::eye(), cv::Vec3f(s.t_MH_x, s.t_MH_y, s.t_MH_z)); // just copy pasted these lines from below +                Affine X_GH = X_CM * X_MH; +                cv::Vec3f p = X_GH.t; // head (center?) position in global space +                cv::Vec2f p_(p[0] / p[2] * fx, p[1] / p[2] * fx);  // projected to screen +                fun(p_, cv::Scalar(0, 0, 255));              } -            video_widget->update_image(frame); +            video_widget->update_image(frame_);          }      }      qDebug()<<"Tracker:: Thread stopping"; diff --git a/tracker-pt/point_extractor.cpp b/tracker-pt/point_extractor.cpp index 6118f3d2..3808c408 100644 --- a/tracker-pt/point_extractor.cpp +++ b/tracker-pt/point_extractor.cpp @@ -16,10 +16,17 @@  PointExtractor::PointExtractor()  {  } -std::vector<cv::Vec2f> PointExtractor::extract_points(cv::Mat& frame) + +const std::vector<cv::Vec2f>& PointExtractor::extract_points(cv::Mat& frame)  {      const int W = frame.cols;      const int H = frame.rows; +     +    if (frame_gray.rows != frame.rows || frame_gray.cols != frame.cols) +    { +        frame_gray = cv::Mat(frame.rows, frame.cols, CV_8U); +        frame_bin = cv::Mat(frame.rows, frame.cols, CV_8U);; +    }      // convert to grayscale      cv::Mat frame_gray; @@ -151,9 +158,11 @@ std::vector<cv::Vec2f> PointExtractor::extract_points(cv::Mat& frame)      using b = const blob;      std::sort(blobs.begin(), blobs.end(), [](b& b1, b& b2) {return b1.confid > b2.confid;}); +    points.reserve(blobs.size()); +          QMutexLocker l(&mtx); -	points.clear(); +    points.clear();      for (auto& b : blobs)      { diff --git a/tracker-pt/point_extractor.h b/tracker-pt/point_extractor.h index 80c1897d..979cc8b6 100644 --- a/tracker-pt/point_extractor.h +++ b/tracker-pt/point_extractor.h @@ -21,7 +21,7 @@ public:      // extracts points from frame and draws some processing info into frame, if draw_output is set      // dt: time since last call in seconds      // WARNING: returned reference is valid as long as object -    std::vector<cv::Vec2f> extract_points(cv::Mat &frame); +    const std::vector<cv::Vec2f> &extract_points(cv::Mat &frame);      const std::vector<cv::Vec2f>& get_points() { QMutexLocker l(&mtx); return points; }      PointExtractor(); | 
