aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/argaze/utils/export_tobii_segment_aruco_visual_scan.py13
-rw-r--r--src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py11
2 files changed, 15 insertions, 9 deletions
diff --git a/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py b/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py
index 2adf773..4825c9e 100644
--- a/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py
+++ b/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py
@@ -191,6 +191,7 @@ def main():
tobii_accuracy = 1.42 # degree
tobii_precision = 0.34 # degree
tobii_camera_hfov = 82 # degree
+ tobii_visual_hfov = 160 # degree
# Video and data replay loop
try:
@@ -223,8 +224,8 @@ def main():
gaze_position_pixel = (int(nearest_gaze_position.value[0] * video_frame.width), int(nearest_gaze_position.value[1] * video_frame.height))
- gaze_accuracy_mm = numpy.sin(numpy.deg2rad(tobii_accuracy)) * nearest_gaze_position_3d.value[2]
- tobii_camera_hfov_mm = numpy.sin(numpy.deg2rad(tobii_camera_hfov)) * nearest_gaze_position_3d.value[2]
+ gaze_accuracy_mm = numpy.tan(numpy.deg2rad(tobii_accuracy)) * nearest_gaze_position_3d.value[2]
+ tobii_camera_hfov_mm = numpy.tan(numpy.deg2rad(tobii_camera_hfov / 2)) * nearest_gaze_position_3d.value[2]
gaze_accuracy_pixel = round(video_frame.width * float(gaze_accuracy_mm) / float(tobii_camera_hfov_mm))
# Draw gaze position and accuracy
@@ -259,7 +260,7 @@ def main():
for (i, marker_id) in enumerate(aruco_tracker.get_markers_ids()):
# Copy 3D scene related to detected marker
- aoi3D_scene = aoi3D_scene_selector(marker_id).copy()
+ aoi3D_scene = aoi3D_scene_selector(marker_id)
if aoi3D_scene == None:
continue
@@ -275,8 +276,10 @@ def main():
aoi3D_camera = aoi3D_scene.transform(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i))
# Get aoi inside vision cone field
- # The vision cone tip is positionned behind the head
- aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(300, 150, cone_tip=[0., 0., -20.])
+ cone_vision_height_cm = nearest_gaze_position_3d.value[2]/10 # cm
+ cone_vision_radius_cm = numpy.tan(numpy.deg2rad(tobii_visual_hfov / 2)) * cone_vision_height_cm
+
+ aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(cone_vision_radius_cm, cone_vision_height_cm)
# Keep only aoi inside vision cone field
aoi3D_scene = aoi3D_scene.copy(exclude=aoi3D_outside.keys())
diff --git a/src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py b/src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py
index cba466b..1ab02e4 100644
--- a/src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py
+++ b/src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py
@@ -84,6 +84,7 @@ def main():
tobii_accuracy = 1.42 # degree
tobii_precision = 0.34 # degree
tobii_camera_hfov = 82 # degree
+ tobii_visual_hfov = 160 # degree
# Start streaming
tobii_controller.start_streaming()
@@ -120,8 +121,8 @@ def main():
gaze_position_pixel = (int(nearest_gaze_position.value[0] * video_frame.width), int(nearest_gaze_position.value[1] * video_frame.height))
- gaze_accuracy_mm = numpy.sin(numpy.deg2rad(tobii_accuracy)) * nearest_gaze_position_3d.value[2]
- tobii_camera_hfov_mm = numpy.sin(numpy.deg2rad(tobii_camera_hfov)) * nearest_gaze_position_3d.value[2]
+ gaze_accuracy_mm = numpy.tan(numpy.deg2rad(tobii_accuracy)) * nearest_gaze_position_3d.value[2]
+ tobii_camera_hfov_mm = numpy.tan(numpy.deg2rad(tobii_camera_hfov / 2)) * nearest_gaze_position_3d.value[2]
gaze_accuracy_pixel = round(video_frame.width * float(gaze_accuracy_mm) / float(tobii_camera_hfov_mm))
# Draw gaze position and accuracy
@@ -168,8 +169,10 @@ def main():
aoi3D_camera = aoi3D_scene.transform(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i))
# Get aoi inside vision cone field
- # The vision cone tip is positionned behind the head
- aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(300, 150, cone_tip=[0., 0., -20.])
+ cone_vision_height_cm = nearest_gaze_position_3d.value[2]/10 # cm
+ cone_vision_radius_cm = numpy.tan(numpy.deg2rad(tobii_visual_hfov / 2)) * cone_vision_height_cm
+
+ aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(cone_vision_radius_cm, cone_vision_height_cm)
# Keep only aoi inside vision cone field
aoi3D_scene = aoi3D_scene.copy(exclude=aoi3D_outside.keys())