summaryrefslogtreecommitdiffhomepage
path: root/tracker-neuralnet
diff options
context:
space:
mode:
authorZhao Zhixu <zzx.2013@qq.com>2023-04-07 21:30:37 +0800
committerGitHub <noreply@github.com>2023-04-07 13:30:37 +0000
commitfc135349a356ee50703361cacb83ea4a83c71936 (patch)
tree8b01ecc0b6d6ed1803f8d79cb356c7627020e45a /tracker-neuralnet
parent0883d8a05f8f7f16d94516a27bfea9f9913a90be (diff)
tracker/neuralnet: Add support for building on Linux. (#1638)
Diffstat (limited to 'tracker-neuralnet')
-rw-r--r--tracker-neuralnet/CMakeLists.txt6
-rw-r--r--tracker-neuralnet/model_adapters.cpp42
-rw-r--r--tracker-neuralnet/model_adapters.h13
3 files changed, 46 insertions, 15 deletions
diff --git a/tracker-neuralnet/CMakeLists.txt b/tracker-neuralnet/CMakeLists.txt
index 4689dba1..9298cd1f 100644
--- a/tracker-neuralnet/CMakeLists.txt
+++ b/tracker-neuralnet/CMakeLists.txt
@@ -16,15 +16,17 @@ if(OpenCV_FOUND AND ONNXRuntime_FOUND AND OpenMP_FOUND)
otr_module(tracker-neuralnet)
- target_link_libraries(${self}
+ target_link_libraries(${self}
opentrack-cv
onnxruntime::onnxruntime
opencv_calib3d
opencv_imgproc
opencv_imgcodecs
opencv_core
- OpenMP::OpenMP_C
+ OpenMP::OpenMP_CXX
)
+ # OpenMP::OpenMP_CXX doesn't set up the -fopenmp linking option, so set it up ourselves.
+ target_link_options(${self} PUBLIC ${OpenMP_CXX_FLAGS})
install(
FILES "models/head-localizer.onnx"
diff --git a/tracker-neuralnet/model_adapters.cpp b/tracker-neuralnet/model_adapters.cpp
index af599321..a8e55b2a 100644
--- a/tracker-neuralnet/model_adapters.cpp
+++ b/tracker-neuralnet/model_adapters.cpp
@@ -7,7 +7,7 @@
#include <opencv2/imgproc.hpp>
#include <QDebug>
-
+#include <algorithm>
namespace neuralnet_tracker_ns
{
@@ -165,6 +165,24 @@ double Localizer::last_inference_time_millis() const
}
+std::string PoseEstimator::get_network_input_name(size_t i) const
+{
+#if ORT_API_VERSION >= 12
+ return std::string(&*session_.GetInputNameAllocated(i, allocator_));
+#else
+ return std::string(session_.GetInputName(i, allocator_));
+#endif
+}
+
+std::string PoseEstimator::get_network_output_name(size_t i) const
+{
+#if ORT_API_VERSION >= 12
+ return std::string(&*session_.GetOutputNameAllocated(i, allocator_));
+#else
+ return std::string(session_.GetOutputName(i, allocator_));
+#endif
+}
+
PoseEstimator::PoseEstimator(Ort::MemoryInfo &allocator_info, Ort::Session &&session)
: model_version_{session.GetModelMetadata().GetVersion()}
, session_{std::move(session)}
@@ -215,14 +233,16 @@ PoseEstimator::PoseEstimator(Ort::MemoryInfo &allocator_info, Ort::Session &&ses
qDebug() << "Pose model inputs (" << session_.GetInputCount() << ")";
qDebug() << "Pose model outputs (" << session_.GetOutputCount() << "):";
+ output_names_.resize(session_.GetOutputCount());
+ output_c_names_.resize(session_.GetOutputCount());
for (size_t i=0; i<session_.GetOutputCount(); ++i)
{
- const char* name = session_.GetOutputName(i, allocator_);
+ std::string name = get_network_output_name(i);
const auto& output_info = session_.GetOutputTypeInfo(i);
const auto& onnx_tensor_spec = output_info.GetTensorTypeAndShapeInfo();
auto my_tensor_spec = understood_outputs.find(name);
- qDebug() << "\t" << name << " (" << onnx_tensor_spec.GetShape() << ") dtype: " << onnx_tensor_spec.GetElementType() << " " <<
+ qDebug() << "\t" << name.c_str() << " (" << onnx_tensor_spec.GetShape() << ") dtype: " << onnx_tensor_spec.GetElementType() << " " <<
(my_tensor_spec != understood_outputs.end() ? "ok" : "unknown");
if (my_tensor_spec != understood_outputs.end())
@@ -240,7 +260,8 @@ PoseEstimator::PoseEstimator(Ort::MemoryInfo &allocator_info, Ort::Session &&ses
// Create tensor regardless and ignore output
output_val_.push_back(create_tensor(output_info, allocator_));
}
- output_names_.push_back(name);
+ output_names_[i] = name;
+ output_c_names_[i] = output_names_[i].c_str();
}
has_uncertainty_ = understood_outputs.at("rotaxis_scales_tril").available ||
@@ -270,9 +291,12 @@ PoseEstimator::PoseEstimator(Ort::MemoryInfo &allocator_info, Ort::Session &&ses
// output_val_.push_back(create_tensor(output_info, allocator_));
// }
+ input_names_.resize(session_.GetInputCount());
+ input_c_names_.resize(session_.GetInputCount());
for (size_t i = 0; i < session_.GetInputCount(); ++i)
{
- input_names_.push_back(session_.GetInputName(i, allocator_));
+ input_names_[i] = get_network_input_name(i);
+ input_c_names_[i] = input_names_[i].c_str();
}
assert (input_names_.size() == input_val_.size());
@@ -312,11 +336,11 @@ std::optional<PoseEstimator::Face> PoseEstimator::run(
{
session_.Run(
Ort::RunOptions{ nullptr },
- input_names_.data(),
+ input_c_names_.data(),
input_val_.data(),
input_val_.size(),
- output_names_.data(),
- output_val_.data(),
+ output_c_names_.data(),
+ output_val_.data(),
output_val_.size());
}
catch (const Ort::Exception &e)
@@ -430,4 +454,4 @@ double PoseEstimator::last_inference_time_millis() const
-} // namespace neuralnet_tracker_ns \ No newline at end of file
+} // namespace neuralnet_tracker_ns
diff --git a/tracker-neuralnet/model_adapters.h b/tracker-neuralnet/model_adapters.h
index 3fbfb861..820330cf 100644
--- a/tracker-neuralnet/model_adapters.h
+++ b/tracker-neuralnet/model_adapters.h
@@ -3,6 +3,7 @@
#include <optional>
#include <array>
#include <vector>
+#include <string>
#include <onnxruntime_cxx_api.h>
#include <opencv2/core.hpp>
@@ -21,7 +22,7 @@ class Localizer
public:
Localizer(Ort::MemoryInfo &allocator_info,
Ort::Session &&session);
-
+
// Returns bounding wrt image coordinate of the input image
// The preceeding float is the score for being a face normalized to [0,1].
std::pair<float, cv::Rect2f> run(
@@ -68,13 +69,16 @@ class PoseEstimator
bool has_uncertainty() const { return has_uncertainty_; }
private:
+ std::string get_network_input_name(size_t i) const;
+ std::string get_network_output_name(size_t i) const;
int64_t model_version_ = 0; // Queried meta data from the ONNX file
Ort::Session session_{nullptr}; // ONNX's runtime context for running the model
Ort::Allocator allocator_; // Memory allocator for tensors
// Inputs
cv::Mat scaled_frame_{}, input_mat_{}; // Input. One is the original crop, the other is rescaled (?)
std::vector<Ort::Value> input_val_; // Tensors to put into the model
- std::vector<const char*> input_names_; // Refers to the names in the onnx model.
+ std::vector<std::string> input_names_; // Refers to the names in the onnx model.
+ std::vector<const char *> input_c_names_; // Refers to the C names in the onnx model.
// Outputs
cv::Vec<float, 3> output_coord_{}; // 2d Coordinate and head size output.
cv::Vec<float, 4> output_quat_{}; // Quaternion output
@@ -83,7 +87,8 @@ class PoseEstimator
cv::Vec<float, 2> output_eyes_{};
cv::Vec<float, 3> output_coord_scales_{};
std::vector<Ort::Value> output_val_; // Tensors to put the model outputs in.
- std::vector<const char*> output_names_; // Refers to the names in the onnx model.
+ std::vector<std::string> output_names_; // Refers to the names in the onnx model.
+ std::vector<const char *> output_c_names_; // Refers to the C names in the onnx model.
// More bookkeeping
size_t num_recurrent_states_ = 0;
double last_inference_time_ = 0;
@@ -99,4 +104,4 @@ int find_input_intensity_quantile(const cv::Mat& frame, float percentage);
void normalize_brightness(const cv::Mat& frame, cv::Mat& out);
-} // namespace neuralnet_tracker_ns \ No newline at end of file
+} // namespace neuralnet_tracker_ns