diff options
author | Stanislaw Halik <sthalik@misaki.pl> | 2023-05-16 12:14:47 +0200 |
---|---|---|
committer | Stanislaw Halik <sthalik@misaki.pl> | 2023-05-16 12:16:23 +0200 |
commit | 5e73a2f8af689afbf9ca4da0c7239b6537e6a9ce (patch) | |
tree | 42859c48d667106b57147c8d1175d39b355ad2dc | |
parent | 2c831b784e42d96e62b46d647bcb5ba09ca4a05c (diff) |
merge in trackhat ui
-rw-r--r-- | opentrack/main-window.cpp | 36 | ||||
-rw-r--r-- | tracker-neuralnet/ftnoir_tracker_neuralnet.cpp | 69 | ||||
-rw-r--r-- | tracker-neuralnet/lang/ru_RU.ts | 40 |
3 files changed, 105 insertions, 40 deletions
diff --git a/opentrack/main-window.cpp b/opentrack/main-window.cpp index f449845b..3e60b7a3 100644 --- a/opentrack/main-window.cpp +++ b/opentrack/main-window.cpp @@ -18,6 +18,7 @@ #include "compat/math.hpp" #include "compat/sysexits.hpp" #include "opentrack/defs.hpp" +#include "software-update-dialog.hpp" #include <cstring> #include <utility> @@ -33,6 +34,7 @@ #include <QDateTime> extern "C" const char* const opentrack_version; +extern "C" OTR_GENERIC_IMPORT bool opentrack_using_dark_theme; using namespace options::globals; using namespace options; @@ -58,6 +60,8 @@ main_window::main_window() : State(OPENTRACK_BASE_PATH + OPENTRACK_LIBRARY_PATH) setVisible(!start_in_tray()); ensure_tray(); + ui.pose_display->set_grid_background(opentrack_using_dark_theme); + connect(&pose_update_timer, &QTimer::timeout, this, &main_window::show_pose, Qt::DirectConnection); connect(&det_timer, &QTimer::timeout, @@ -70,7 +74,13 @@ main_window::main_window() : State(OPENTRACK_BASE_PATH + OPENTRACK_LIBRARY_PATH) #ifdef UI_NO_VIDEO_FEED fake_video_frame.resize(640, 480); fake_video_frame_parent.setVisible(false); +#elif defined UI_COMPACT_VIDEO_FEED + connect(ui.preview_checkbox, &QCheckBox::toggled, this, &main_window::toggle_video_preview); #endif + + updater = std::make_unique<update_query>(this); + updater->maybe_show_dialog(); + } void main_window::init_shortcuts() @@ -419,7 +429,12 @@ void main_window::update_button_state(bool running, bool inertialp) ui.iconcomboTrackerSource->setEnabled(not_running); #endif ui.profile_button->setEnabled(not_running); -#ifndef UI_NO_VIDEO_FEED +#ifdef UI_COMPACT_VIDEO_FEED + ui.preview_checkbox->setChecked(false); + ui.preview_checkbox->raise(); + ui.preview_checkbox->setVisible(running && !inertialp); + toggle_video_preview(false); +#elif !defined UI_NO_VIDEO_FEED ui.video_frame_label->setVisible(not_running || inertialp); if(not_running) { @@ -431,6 +446,22 @@ void main_window::update_button_state(bool running, bool inertialp) #endif } +#ifdef UI_COMPACT_VIDEO_FEED +void main_window::toggle_video_preview(bool value) +{ + value &= ui.video_frame->layout() != nullptr; + ui.video_frame_parent->setVisible(value); + ui.video_frame_parent->raise(); + ui.video_frame->raise(); + ui.pose_display->setVisible(!value); + ui.preview_checkbox->raise(); + if (value) + ui.preview_checkbox->setStyleSheet("QCheckBox { color: #32CD32 }"); + else + ui.preview_checkbox->setStyleSheet(""); +} +#endif + void main_window::start_tracker_() { if (work) @@ -471,7 +502,7 @@ void main_window::start_tracker_() options_widget->register_filter(&*work->libs.pFilter); } - pose_update_timer.start(1000/30); + pose_update_timer.start(15); // NB check valid since SelectedLibraries ctor called // trackers take care of layout state updates @@ -683,6 +714,7 @@ static void show_module_settings(std::shared_ptr<Instance> instance, void(Dialog::*register_fun)(Instance*), void(options_dialog::*switch_tab_fun)()) { + using BaseDialog = plugin_api::detail::BaseDialog; if (!lib || !lib->Dialog) return; diff --git a/tracker-neuralnet/ftnoir_tracker_neuralnet.cpp b/tracker-neuralnet/ftnoir_tracker_neuralnet.cpp index 59e17063..797febd8 100644 --- a/tracker-neuralnet/ftnoir_tracker_neuralnet.cpp +++ b/tracker-neuralnet/ftnoir_tracker_neuralnet.cpp @@ -13,6 +13,7 @@ #include "compat/math-imports.hpp" #include "compat/timer.hpp" #include "compat/check-visible.hpp" +#include "compat/camera-names.hpp" #include "cv/init.hpp" #include <omp.h> @@ -83,7 +84,8 @@ struct OnScopeExit CamIntrinsics make_intrinsics(const cv::Mat& img, const Settings& settings) { const int w = img.cols, h = img.rows; - const double diag_fov = settings.fov * M_PI / 180.; + //const double diag_fov = settings.fov * M_PI / 180.; + const double diag_fov = 60 * M_PI / 180.; (void)settings; const double fov_w = 2.*atan(tan(diag_fov/2.)/sqrt(1. + h/(double)w * h/(double)w)); const double fov_h = 2.*atan(tan(diag_fov/2.)/sqrt(1. + w/(double)h * w/(double)h)); const double focal_length_w = 1. / tan(.5 * fov_w); @@ -351,7 +353,7 @@ bool NeuralNetTracker::detect() last_pose_affine_ = pose_affine; } - draw_gizmos(*face, last_pose_affine_); + draw_gizmos(*face, pose_affine); return true; } @@ -501,17 +503,38 @@ bool NeuralNetTracker::load_and_initialize_model() bool NeuralNetTracker::open_camera() { +#if 0 int rint = std::clamp(*settings_.resolution, 0, (int)std::size(resolution_choices)-1); resolution_tuple res = resolution_choices[rint]; int fps = enum_to_fps(settings_.force_fps); +#endif + + video::impl::camera::info args {}; + args.width = 640; + args.height = 480; + args.fps = 60; + args.use_mjpeg = true; QMutexLocker l(&camera_mtx_); - camera_ = video::make_camera(settings_.camera_name); + camera_ = nullptr; + const QString name = settings_.camera_name; + + if (name.isEmpty() || name == "TrackHat sensor") + { + camera_ = video::make_camera_("TrackHat sensor"); + if (camera_ && camera_->start(args)) + return true; + if (!name.isEmpty()) + return false; + } + + camera_ = video::make_camera(name); if (!camera_) return false; +#if 0 video::impl::camera::info args {}; if (res.width) @@ -523,6 +546,7 @@ bool NeuralNetTracker::open_camera() args.fps = fps; args.use_mjpeg = settings_.use_mjpeg; +#endif if (!camera_->start(args)) { @@ -600,6 +624,8 @@ void NeuralNetTracker::run() std::chrono::duration_cast<std::chrono::milliseconds>( clk.now() - t).count()*1.e-3); } + + camera_ = nullptr; } @@ -644,19 +670,23 @@ void NeuralNetTracker::update_fps(double dt) void NeuralNetTracker::data(double *data) { - Affine tmp = [&]() + auto tmp2 = [&]() { QMutexLocker lck(&mtx_); return last_pose_affine_; }(); + if (!tmp2) + return; + const auto& tmp = *tmp2; + const auto& mx = tmp.R.col(0); const auto& my = tmp.R.col(1); const auto& mz = -tmp.R.col(2); const float yaw = std::atan2(mx(2), mx(0)); const float pitch = -std::atan2(-mx(1), std::sqrt(mx(2)*mx(2)+mx(0)*mx(0))); - const float roll = std::atan2(-my(2), mz(2)); + const float roll = -std::atan2(-my(2), mz(2)); { constexpr double rad2deg = 180/M_PI; data[Yaw] = rad2deg * yaw; @@ -674,7 +704,7 @@ void NeuralNetTracker::data(double *data) Affine NeuralNetTracker::pose() { QMutexLocker lck(&mtx_); - return last_pose_affine_; + return last_pose_affine_ ? *last_pose_affine_ : Affine{}; } std::tuple<cv::Size,double, double> NeuralNetTracker::stats() const @@ -685,16 +715,19 @@ std::tuple<cv::Size,double, double> NeuralNetTracker::stats() const void NeuralNetDialog::make_fps_combobox() { +#if 0 for (int k = 0; k < fps_MAX; k++) { const int hz = enum_to_fps(k); const QString name = (hz == 0) ? tr("Default") : QString::number(hz); ui_.cameraFPS->addItem(name, k); } +#endif } void NeuralNetDialog::make_resolution_combobox() { +#if 0 int k=0; for (const auto [w, h] : resolution_choices) { @@ -703,6 +736,7 @@ void NeuralNetDialog::make_resolution_combobox() : QString::number(w) + " x " + QString::number(h); ui_.resolution->addItem(s, k++); } +#endif } @@ -714,14 +748,18 @@ NeuralNetDialog::NeuralNetDialog() : make_fps_combobox(); make_resolution_combobox(); + ui_.cameraName->addItem(QString{}); for (const auto& str : video::camera_names()) ui_.cameraName->addItem(str); tie_setting(settings_.camera_name, ui_.cameraName); +#if 0 tie_setting(settings_.fov, ui_.cameraFOV); +#endif tie_setting(settings_.offset_fwd, ui_.tx_spin); tie_setting(settings_.offset_up, ui_.ty_spin); tie_setting(settings_.offset_right, ui_.tz_spin); +#if 0 tie_setting(settings_.show_network_input, ui_.showNetworkInput); tie_setting(settings_.roi_filter_alpha, ui_.roiFilterAlpha); tie_setting(settings_.use_mjpeg, ui_.use_mjpeg); @@ -729,6 +767,23 @@ NeuralNetDialog::NeuralNetDialog() : tie_setting(settings_.num_threads, ui_.threadCount); tie_setting(settings_.resolution, ui_.resolution); tie_setting(settings_.force_fps, ui_.cameraFPS); +#endif + + { + const struct { + QString label; + exposure_preset preset; + } presets[] = { + { QStringLiteral("Near (1-4ft)"), exposure_preset::near }, + { QStringLiteral("Far (4-8ft)"), exposure_preset::far }, + { QStringLiteral("Custom"), exposure_preset::ignored }, + }; + + for (const auto& [label, preset] : presets) + ui_.exposure_preset->addItem(label, int(preset)); + + tie_setting(cs_.exposure, ui_.exposure_preset); + } connect(ui_.buttonBox, SIGNAL(accepted()), this, SLOT(doOK())); connect(ui_.buttonBox, SIGNAL(rejected()), this, SLOT(doCancel())); @@ -750,11 +805,13 @@ NeuralNetDialog::NeuralNetDialog() : void NeuralNetDialog::save() { settings_.b->save(); + cs_.b->save(); } void NeuralNetDialog::reload() { settings_.b->reload(); + cs_.b->reload(); } void NeuralNetDialog::doOK() diff --git a/tracker-neuralnet/lang/ru_RU.ts b/tracker-neuralnet/lang/ru_RU.ts index b191e769..cbfce1d5 100644 --- a/tracker-neuralnet/lang/ru_RU.ts +++ b/tracker-neuralnet/lang/ru_RU.ts @@ -8,22 +8,10 @@ <translation>Настройки трекера</translation> </message> <message> - <source>Diagonal FOV</source> - <translation>Угол обзора</translation> - </message> - <message> <source>Camera settings</source> <translation>Настройки камеры</translation> </message> <message> - <source>Frames per second</source> - <translation>Кадры в секунду</translation> - </message> - <message> - <source>Camera name</source> - <translation>Камера</translation> - </message> - <message> <source>Camera Configuration</source> <translation>Конфигурация камеры</translation> </message> @@ -62,10 +50,6 @@ Don't roll or change position.</source> <translation>Показать входные данные</translation> </message> <message> - <source>MJPEG</source> - <translation>Использовать MJPEG</translation> - </message> - <message> <source>Tuning / Debug</source> <translation>Тонкая настройка</translation> </message> @@ -82,22 +66,6 @@ Don't roll or change position.</source> <translation>Количество потоков</translation> </message> <message> - <source>Resolution</source> - <translation>Разрешение</translation> - </message> - <message> - <source>Field of view. Needed to transform the pose to world coordinates.</source> - <translation>Угол обзора камеры. Требуется для преобразования положения головы в глобальные координаты</translation> - </message> - <message> - <source>Requested video frame rate. Actual setting may not be supported by the camera.</source> - <translation>Частота кадров. Реальные значения могут не поддерживаться камерой.</translation> - </message> - <message> - <source>The requested resolution for cases where the camera delivers maximum frame rate only for a particular resolution. The image may still be downscaled to the internal resolution.</source> - <translation>Разрешение камеры, для тех случаев, когда быстродействие камеры максимально в определенном разрешении. Может быть масштабировано до внутреннего разрешения.</translation> - </message> - <message> <source>Number of threads. Can be used to balance the CPU load between the game and the tracker.</source> <translation>Количество потоков. Используется для балансировки нагрузки на процессор между игрой и трекером.</translation> </message> @@ -113,6 +81,14 @@ Don't roll or change position.</source> <source>Zoom factor for the face region. Applied before the patch is fed into the pose estimation model. There is a sweet spot near 1.</source> <translation>Фактор масштабирования области лица. Применяется перед передачей кадра в модель определения позиции. Наилучшие результаты близки к 1</translation> </message> + <message> + <source>Camera override</source> + <translation type="unfinished"></translation> + </message> + <message> + <source>Exposure preset</source> + <translation type="unfinished"></translation> + </message> </context> <context> <name>neuralnet_tracker_ns::NeuralNetDialog</name> |