aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/argaze/utils/export_tobii_segment_aruco_visual_scan.py106
-rw-r--r--src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py86
2 files changed, 102 insertions, 90 deletions
diff --git a/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py b/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py
index 7817a39..d301f55 100644
--- a/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py
+++ b/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py
@@ -235,13 +235,17 @@ def main():
if head_moving and head_movement_norm < 10:
head_moving = False
- # Ignore frame where head is moving
+ # When head is moving, ArUco tracking could return bad pose estimation and so bad AOI scene projection
if head_moving:
- raise UserWarning('Head is moving')
+ raise AOIFeatures.AOISceneMissing('Head is moving')
# Get nearest gaze position before video timestamp and remove all gaze positions before
_, nearest_gaze_position = tobii_ts_gaze_positions.pop_first_until(video_ts)
+ # Ignore frame when gaze position is not valid
+ if nearest_gaze_position.validity == 1:
+ raise GazeFeatures.InvalidGazeData('Unvalid gaze position')
+
gaze_position_pixel = (int(nearest_gaze_position.value[0] * visu_frame.width), int(nearest_gaze_position.value[1] * visu_frame.height))
# Draw gaze position
@@ -250,9 +254,9 @@ def main():
# Get nearest gaze position 3D before video timestamp and remove all gaze positions before
_, nearest_gaze_position_3d = tobii_ts_gaze_positions_3d.pop_first_until(video_ts)
- # Ignore frame when gaze precison can't be evaluated
- if nearest_gaze_position_3d.value[2] <= 0:
- raise UserWarning('Negative Z gaze position 3D')
+ # Ignore frame when gaze position 3D is not valid
+ if nearest_gaze_position_3d.validity == 1:
+ raise GazeFeatures.InvalidGazeData('Unvalid gaze position 3D')
gaze_accuracy_mm = numpy.tan(numpy.deg2rad(tobii_accuracy)) * nearest_gaze_position_3d.value[2]
tobii_camera_hfov_mm = numpy.tan(numpy.deg2rad(tobii_camera_hfov / 2)) * nearest_gaze_position_3d.value[2]
@@ -261,7 +265,7 @@ def main():
# Draw gaze accuracy
cv.circle(visu_frame.matrix, gaze_position_pixel, gaze_accuracy_pixel, (0, 255, 255), 1)
- # Store gaze position and precision at this time in millisecond
+ # Store gaze position and accuracy in millisecond for further visual scan processing
ts_gaze_positions[round(video_ts_ms)] = gaze_position_pixel
ts_gaze_accuracies[round(video_ts_ms)] = gaze_accuracy_pixel
@@ -273,8 +277,9 @@ def main():
aruco_tracker.track(video_frame.matrix)
aruco_tracker.draw(visu_frame.matrix)
+ # When no marker is detected, no AOI scene projection can't be done
if aruco_tracker.get_markers_number() == 0:
- raise UserWarning('No marker detected')
+ raise AOIFeatures.AOISceneMissing('No marker detected')
# Store aoi 2D video for further scene merging
aoi2D_dict = {}
@@ -282,57 +287,47 @@ def main():
# Project 3D scene on each video frame and the visualisation frame
for (i, marker_id) in enumerate(aruco_tracker.get_markers_ids()):
- # Process marker pose
- try:
-
- # Copy 3D scene related to detected marker
- aoi3D_scene = aoi3D_scene_selector(marker_id)
-
- if aoi3D_scene == None:
- raise UserWarning('No AOI 3D scene')
-
- # Transform scene into camera referential
- aoi3D_camera = aoi3D_scene.transform(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i))
+ # Copy 3D scene related to detected marker
+ aoi3D_scene = aoi3D_scene_selector(marker_id)
+
+ if aoi3D_scene == None:
+ continue
- # Get aoi inside vision cone field
- cone_vision_height_cm = nearest_gaze_position_3d.value[2]/10 # cm
- cone_vision_radius_cm = numpy.tan(numpy.deg2rad(tobii_visual_hfov / 2)) * cone_vision_height_cm
+ # Transform scene into camera referential
+ aoi3D_camera = aoi3D_scene.transform(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i))
- aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(cone_vision_radius_cm, cone_vision_height_cm)
+ # Get aoi inside vision cone field
+ cone_vision_height_cm = nearest_gaze_position_3d.value[2]/10 # cm
+ cone_vision_radius_cm = numpy.tan(numpy.deg2rad(tobii_visual_hfov / 2)) * cone_vision_height_cm
- # Keep only aoi inside vision cone field
- aoi3D_scene = aoi3D_scene.copy(exclude=aoi3D_outside.keys())
+ aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(cone_vision_radius_cm, cone_vision_height_cm)
- # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it
- # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable.
- aoi2D_video_scene = aoi3D_scene.project(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i), aruco_camera.get_K())
+ # Keep only aoi inside vision cone field
+ aoi3D_scene = aoi3D_scene.copy(exclude=aoi3D_outside.keys())
- # Store each 2D aoi for further scene merging
- for name, aoi in aoi2D_video_scene.items():
+ # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it
+ # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable.
+ aoi2D_video_scene = aoi3D_scene.project(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i), aruco_camera.get_K())
- if name not in aoi2D_dict.keys():
- aoi2D_dict[name] = []
+ # Store each 2D aoi for further scene merging
+ for name, aoi in aoi2D_video_scene.items():
- aoi2D_dict[name].append(aoi.clockwise())
+ if name not in aoi2D_dict.keys():
+ aoi2D_dict[name] = []
- # Select 2D visu scene if there is one for the detected marker
- aoi2D_visu_scene = aoi2D_visu_scene_selector(marker_id)
- aoi2D_visu_frame = aoi2D_visu_frame_selector(marker_id)
-
- if aoi2D_visu_scene == None:
- continue
-
- look_at = aoi2D_video_scene['Visualisation_Plan'].look_at(gaze_position_pixel)
+ aoi2D_dict[name].append(aoi.clockwise())
- visu_gaze_pixel = aoi2D_visu_scene['Visualisation_Plan'].looked_pixel(look_at)
- cv.circle(aoi2D_visu_frame, visu_gaze_pixel, 4, (0, 0, 255), -1)
+ # Select 2D visu scene if there is one for the detected marker
+ aoi2D_visu_scene = aoi2D_visu_scene_selector(marker_id)
+ aoi2D_visu_frame = aoi2D_visu_frame_selector(marker_id)
+
+ if aoi2D_visu_scene == None:
+ continue
+
+ look_at = aoi2D_video_scene['Visualisation_Plan'].look_at(gaze_position_pixel)
- # Write warning related to marker pose processing
- except UserWarning as e:
-
- top = int(visu_frame.height) - (marker_id+1) * 40
- cv.rectangle(visu_frame.matrix, (0, top), (550, top+40), (127, 127, 127), -1)
- cv.putText(visu_frame.matrix, f'Marker {marker_id}: {e}', (20, top + 30), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ visu_gaze_pixel = aoi2D_visu_scene['Visualisation_Plan'].looked_pixel(look_at)
+ cv.circle(aoi2D_visu_frame, visu_gaze_pixel, 4, (0, 0, 255), -1)
# Merge all 2D aoi into a single 2D scene
aoi2D_merged_scene = AOI2DScene.AOI2DScene()
@@ -340,16 +335,27 @@ def main():
aoi2D_merged_scene[name] = numpy.sum(aoi_array, axis=0) / len(aoi_array)
aoi2D_merged_scene.draw(visu_frame.matrix, gaze_position_pixel, gaze_accuracy_pixel, exclude=['Visualisation_Plan'])
+
+ # When the merged scene is empty
+ if len(aoi2D_merged_scene.keys()) == 0:
+ raise AOIFeatures.AOISceneMissing('Scene is empty')
# Store 2D merged scene at this time in millisecond
ts_aois_scenes[round(video_ts_ms)] = aoi2D_merged_scene
- # Write warning related to video and data frame processing
- except UserWarning as e:
+ # Raised when gaze data can't be processed
+ except GazeFeatures.InvalidGazeData as e:
+
+ cv.rectangle(visu_frame.matrix, (0, 50), (550, 100), (127, 127, 127), -1)
+ cv.putText(visu_frame.matrix, str(e), (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+
+ # Raised when aoi scene is missing
+ except AOIFeatures.AOISceneMissing as e:
cv.rectangle(visu_frame.matrix, (0, 50), (550, 100), (127, 127, 127), -1)
cv.putText(visu_frame.matrix, str(e), (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ # Raised when buffer is empty
except ValueError:
pass
diff --git a/src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py b/src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py
index 8462be8..b794054 100644
--- a/src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py
+++ b/src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py
@@ -134,13 +134,17 @@ def main():
if head_moving and head_movement_norm < 10:
head_moving = False
- # Ignore frame where head is moving
+ # When head is moving, ArUco tracking could return bad pose estimation and so bad AOI scene projection
if head_moving:
- raise UserWarning('Head is moving')
+ raise AOIFeatures.AOISceneMissing('Head is moving')
# Get nearest gaze position before video timestamp and remove all gaze positions before
_, nearest_gaze_position = tobii_ts_gaze_positions.pop_first_until(video_ts)
+ # Ignore frame when gaze position is not valid
+ if nearest_gaze_position.validity == 1:
+ raise GazeFeatures.InvalidGazeData('Unvalid gaze position')
+
gaze_position_pixel = (int(nearest_gaze_position.value[0] * visu_frame.width), int(nearest_gaze_position.value[1] * visu_frame.height))
# Draw gaze position
@@ -149,9 +153,9 @@ def main():
# Get nearest gaze position 3D before video timestamp and remove all gaze positions before
_, nearest_gaze_position_3d = tobii_ts_gaze_positions_3d.pop_first_until(video_ts)
- # Ignore frame when gaze precison can't be evaluated
- if nearest_gaze_position_3d.value[2] <= 0:
- raise UserWarning('Negative Z gaze position 3D value')
+ # Ignore frame when gaze position 3D is not valid
+ if nearest_gaze_position_3d.validity == 1:
+ raise GazeFeatures.InvalidGazeData('Unvalid gaze position 3D')
gaze_position_pixel = (int(nearest_gaze_position.value[0] * visu_frame.width), int(nearest_gaze_position.value[1] * visu_frame.height))
@@ -171,8 +175,9 @@ def main():
aruco_tracker.track(video_frame.matrix)
aruco_tracker.draw(visu_frame.matrix)
+ # When no marker is detected, no AOI scene projection can't be done
if aruco_tracker.get_markers_number() == 0:
- raise UserWarning('No marker detected')
+ raise AOIFeatures.AOISceneMissing('No marker detected')
# Store aoi 2D video for further scene merging
aoi2D_dict = {}
@@ -180,45 +185,35 @@ def main():
# Project 3D scenes related to each aruco markers
for (i, marker_id) in enumerate(aruco_tracker.get_markers_ids()):
- # Process marker pose
- try:
-
- # Select 3D scene related to detected marker
- aoi3D_scene = aoi3D_scene_selector(marker_id)
-
- if aoi3D_scene == None:
- raise UserWarning('No AOI 3D scene')
-
- # Transform scene into camera referential
- aoi3D_camera = aoi3D_scene.transform(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i))
+ # Select 3D scene related to detected marker
+ aoi3D_scene = aoi3D_scene_selector(marker_id)
+
+ if aoi3D_scene == None:
+ continue
- # Get aoi inside vision cone field
- cone_vision_height_cm = nearest_gaze_position_3d.value[2]/10 # cm
- cone_vision_radius_cm = numpy.tan(numpy.deg2rad(tobii_visual_hfov / 2)) * cone_vision_height_cm
+ # Transform scene into camera referential
+ aoi3D_camera = aoi3D_scene.transform(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i))
- aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(cone_vision_radius_cm, cone_vision_height_cm)
+ # Get aoi inside vision cone field
+ cone_vision_height_cm = nearest_gaze_position_3d.value[2]/10 # cm
+ cone_vision_radius_cm = numpy.tan(numpy.deg2rad(tobii_visual_hfov / 2)) * cone_vision_height_cm
- # Keep only aoi inside vision cone field
- aoi3D_scene = aoi3D_scene.copy(exclude=aoi3D_outside.keys())
+ aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(cone_vision_radius_cm, cone_vision_height_cm)
- # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it
- # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable.
- aoi2D_video_scene = aoi3D_scene.project(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i), aruco_camera.get_K())
+ # Keep only aoi inside vision cone field
+ aoi3D_scene = aoi3D_scene.copy(exclude=aoi3D_outside.keys())
- # Store each 2D aoi for further scene merging
- for name, aoi in aoi2D_video_scene.items():
+ # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it
+ # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable.
+ aoi2D_video_scene = aoi3D_scene.project(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i), aruco_camera.get_K())
- if name not in aoi2D_dict.keys():
- aoi2D_dict[name] = []
+ # Store each 2D aoi for further scene merging
+ for name, aoi in aoi2D_video_scene.items():
- aoi2D_dict[name].append(aoi.clockwise())
+ if name not in aoi2D_dict.keys():
+ aoi2D_dict[name] = []
- # Write warning related to marker pose processing
- except UserWarning as e:
-
- top = int(visu_frame.height) - (marker_id+1) * 40
- cv.rectangle(visu_frame.matrix, (0, top), (550, top+40), (127, 127, 127), -1)
- cv.putText(visu_frame.matrix, f'Marker {marker_id}: {e}', (20, top + 30), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ aoi2D_dict[name].append(aoi.clockwise())
# Merge all 2D aoi into a single 2D scene
aoi2D_merged_scene = AOI2DScene.AOI2DScene()
@@ -227,6 +222,10 @@ def main():
aoi2D_merged_scene.draw(visu_frame.matrix, video_gaze_pixel, gaze_accuracy_pixel, exclude=['Visualisation_Plan'])
+ # When the merged scene is empty
+ if len(aoi2D_merged_scene.keys()) == 0:
+ raise AOIFeatures.AOISceneMissing('Scene is empty')
+
# Send look at aoi pointer
for name, aoi in aoi2D_merged_scene.items():
@@ -238,12 +237,19 @@ def main():
else:
IvySendMsg(f'looking {name}')
- # Write warning related to video and data frame processing
- except UserWarning as e:
-
+ # Raised when gaze data can't be processed
+ except GazeFeatures.InvalidGazeData as e:
+
+ cv.rectangle(visu_frame.matrix, (0, 50), (550, 100), (127, 127, 127), -1)
+ cv.putText(visu_frame.matrix, str(e), (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+
+ # Raised when aoi scene is missing
+ except AOIFeatures.AOISceneMissing as e:
+
cv.rectangle(visu_frame.matrix, (0, 50), (550, 100), (127, 127, 127), -1)
cv.putText(visu_frame.matrix, str(e), (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ # Raised when buffer is empty
except ValueError:
pass