summaryrefslogtreecommitdiffhomepage
path: root/tracker-neuralnet
diff options
context:
space:
mode:
Diffstat (limited to 'tracker-neuralnet')
-rw-r--r--tracker-neuralnet/lang/zh_CN.ts42
-rw-r--r--tracker-neuralnet/model_adapters.h2
2 files changed, 23 insertions, 21 deletions
diff --git a/tracker-neuralnet/lang/zh_CN.ts b/tracker-neuralnet/lang/zh_CN.ts
index c3a91211..cf12f304 100644
--- a/tracker-neuralnet/lang/zh_CN.ts
+++ b/tracker-neuralnet/lang/zh_CN.ts
@@ -1,35 +1,36 @@
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
+
<TS version="2.1" language="zh_CN">
-<context>
+ <context>
<name>Form</name>
<message>
<source>Tracker settings</source>
- <translation type="unfinished"></translation>
+ <translation >追踪器设置</translation>
</message>
<message>
<source>Diagonal FOV</source>
- <translation type="unfinished"></translation>
+ <translation >对角FOV</translation>
</message>
<message>
<source>Camera name</source>
- <translation type="unfinished"></translation>
+ <translation >相机名</translation>
</message>
<message>
<source>Frames per second</source>
- <translation type="unfinished"></translation>
+ <translation >FPS</translation>
</message>
<message>
<source>Camera settings</source>
- <translation type="unfinished"></translation>
+ <translation >相机设置</translation>
</message>
<message>
<source>Camera Configuration</source>
- <translation type="unfinished"></translation>
+ <translation >相机配置</translation>
</message>
<message>
<source>Head Center Offset</source>
- <translation type="unfinished"></translation>
+ <translation >头部归中补偿</translation>
</message>
<message>
<source> mm</source>
@@ -38,27 +39,28 @@
<message>
<source>Use only yaw and pitch while calibrating.
Don&apos;t roll or change position.</source>
- <translation type="unfinished"></translation>
+ <translation >在校准时只使用偏航和俯仰,
+不要滚转或是改变位置. </translation>
</message>
<message>
<source>Start calibration</source>
- <translation type="unfinished"></translation>
+ <translation >开始校准</translation>
</message>
<message>
<source>Right</source>
- <translation type="unfinished"></translation>
+ <translation >向右</translation>
</message>
<message>
<source>Forward</source>
- <translation type="unfinished"></translation>
+ <translation >向前</translation>
</message>
<message>
<source>Up</source>
- <translation type="unfinished"></translation>
+ <translation >向上</translation>
</message>
<message>
<source>Show Network Input</source>
- <translation type="unfinished"></translation>
+ <translation >展示神经网络输入</translation>
</message>
<message>
<source>MJPEG</source>
@@ -112,20 +114,20 @@ Don&apos;t roll or change position.</source>
<source>Zoom factor for the face region. Applied before the patch is fed into the pose estimation model. There is a sweet spot near 1.</source>
<translation type="unfinished"></translation>
</message>
-</context>
+ </context>
<context>
<name>neuralnet_tracker_ns::NeuralNetDialog</name>
<message>
<source>Default</source>
- <translation type="unfinished"></translation>
+ <translation >默认</translation>
</message>
<message>
<source>Tracker Offline</source>
- <translation type="unfinished"></translation>
+ <translation >追踪器离线</translation>
</message>
<message>
<source>%1x%2 @ %3 FPS / Inference: %4 ms</source>
- <translation type="unfinished"></translation>
+ <translation >%1x%2 @ %3 FPS / 推理: %4 ms</translation>
</message>
<message>
<source>%1 yaw samples. Yaw more to %2 samples for stable calibration.</source>
@@ -141,11 +143,11 @@ Don&apos;t roll or change position.</source>
</message>
<message>
<source>Stop calibration</source>
- <translation type="unfinished"></translation>
+ <translation >结束校准</translation>
</message>
<message>
<source>Start calibration</source>
- <translation type="unfinished"></translation>
+ <translation >开始校准</translation>
</message>
</context>
</TS>
diff --git a/tracker-neuralnet/model_adapters.h b/tracker-neuralnet/model_adapters.h
index 820330cf..48f2fa2c 100644
--- a/tracker-neuralnet/model_adapters.h
+++ b/tracker-neuralnet/model_adapters.h
@@ -73,7 +73,7 @@ class PoseEstimator
std::string get_network_output_name(size_t i) const;
int64_t model_version_ = 0; // Queried meta data from the ONNX file
Ort::Session session_{nullptr}; // ONNX's runtime context for running the model
- Ort::Allocator allocator_; // Memory allocator for tensors
+ mutable Ort::Allocator allocator_; // Memory allocator for tensors
// Inputs
cv::Mat scaled_frame_{}, input_mat_{}; // Input. One is the original crop, the other is rescaled (?)
std::vector<Ort::Value> input_val_; // Tensors to put into the model