aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorThéo de la Hogue2022-09-19 22:32:13 +0200
committerThéo de la Hogue2022-09-19 22:32:13 +0200
commit0aa22b0a04fe0b145a813ea8416acb74c6b8d901 (patch)
treee64f294b124e0ce6bbcb44ef32b833e8d8c4d59a /src
parentc2dc0a7f51e6a15a89f79e1ea44696ec5c64cbae (diff)
downloadargaze-0aa22b0a04fe0b145a813ea8416acb74c6b8d901.zip
argaze-0aa22b0a04fe0b145a813ea8416acb74c6b8d901.tar.gz
argaze-0aa22b0a04fe0b145a813ea8416acb74c6b8d901.tar.bz2
argaze-0aa22b0a04fe0b145a813ea8416acb74c6b8d901.tar.xz
Refactoring code like in export_tobii_segment_aruco_visual_scan.py.
Diffstat (limited to 'src')
-rw-r--r--src/argaze/utils/edit_tobii_segment_aruco_pose.py161
-rw-r--r--src/argaze/utils/export_tobii_segment_aruco_visual_scan.py112
-rw-r--r--src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py160
3 files changed, 242 insertions, 191 deletions
diff --git a/src/argaze/utils/edit_tobii_segment_aruco_pose.py b/src/argaze/utils/edit_tobii_segment_aruco_pose.py
index 238a0b7..72232fe 100644
--- a/src/argaze/utils/edit_tobii_segment_aruco_pose.py
+++ b/src/argaze/utils/edit_tobii_segment_aruco_pose.py
@@ -178,7 +178,7 @@ def main():
# Frame selector loop
frame_index = 0
last_frame_index = -1
- last_frame_matrix = video_frame.matrix.copy()
+ last_frame = video_frame.copy()
selected_marker_id = -1
@@ -193,50 +193,60 @@ def main():
video_ts_ms = video_ts / 1000
last_frame_index = frame_index
- last_frame_matrix = video_frame.matrix.copy()
+ last_frame = video_frame.copy()
+
+ # Hide frame left and right borders before tracking to ignore markers outside focus area
+ cv.rectangle(video_frame.matrix, (0, 0), (int(video_frame.width/6), int(video_frame.height)), (0, 0, 0), -1)
+ cv.rectangle(video_frame.matrix, (int(video_frame.width*(1 - 1/6)), 0), (int(video_frame.width), int(video_frame.height)), (0, 0, 0), -1)
+
+ # Track markers with pose estimation
+ aruco_tracker.track(video_frame.matrix)
else:
- video_frame.matrix = last_frame_matrix.copy()
+ video_frame = last_frame.copy()
+
+ # Copy video frame to edit visualisation on it with out disrupting aruco tracking
+ visu_frame = video_frame.copy()
- # Track markers with pose estimation and draw them
- aruco_tracker.track(video_frame.matrix)
- aruco_tracker.draw(video_frame.matrix)
+ # Draw markers and pose estimation
+ aruco_tracker.draw(visu_frame.matrix)
# Write segment timing
- cv.putText(video_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+ cv.rectangle(visu_frame.matrix, (0, 0), (550, 50), (63, 63, 63), -1)
+ cv.putText(visu_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
# Draw focus area
- cv.circle(video_frame.matrix, (int(video_frame.width/2), int(video_frame.height/2)), int(video_frame.width/3), (255, 150, 150), 1)
-
- # Draw focus area center
- cv.line(video_frame.matrix, (int(video_frame.width/2) - 50, int(video_frame.height/2)), (int(video_frame.width/2) + 50, int(video_frame.height/2)), (255, 150, 150), 1)
- cv.line(video_frame.matrix, (int(video_frame.width/2), int(video_frame.height/2) - 50), (int(video_frame.width/2), int(video_frame.height/2) + 50), (255, 150, 150), 1)
+ cv.rectangle(visu_frame.matrix, (int(visu_frame.width/6), 0), (int(visu_frame.width*(1-1/6)), int(visu_frame.height)), (255, 150, 150), 1)
+
+ # Draw center
+ cv.line(visu_frame.matrix, (int(visu_frame.width/2) - 50, int(visu_frame.height/2)), (int(visu_frame.width/2) + 50, int(visu_frame.height/2)), (255, 150, 150), 1)
+ cv.line(visu_frame.matrix, (int(visu_frame.width/2), int(visu_frame.height/2) - 50), (int(visu_frame.width/2), int(visu_frame.height/2) + 50), (255, 150, 150), 1)
# Draw pointer
- cv.circle(video_frame.matrix, pointer, 2, (0, 255, 255), -1)
+ cv.circle(visu_frame.matrix, pointer, 2, (0, 255, 255), -1)
# Write selected marker id
if selected_marker_id >= 0:
if edit_trans:
- cv.putText(video_frame.matrix, f'Marker {selected_marker_id} R Axis {edit_coord + 1} selected', (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv.LINE_AA)
+ cv.putText(visu_frame.matrix, f'Marker {selected_marker_id}: R Axis {edit_coord + 1} selected', (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv.LINE_AA)
else:
- cv.putText(video_frame.matrix, f'Marker {selected_marker_id} T Axis {edit_coord + 1} selected', (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv.LINE_AA)
+ cv.putText(visu_frame.matrix, f'Marker {selected_marker_id}: T Axis {edit_coord + 1} selected', (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv.LINE_AA)
# Write documentation
else:
- cv.putText(video_frame.matrix, f'Left click on marker to select scene', (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
- cv.putText(video_frame.matrix, f'T to translate, R to rotate', (20, 120), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
- cv.putText(video_frame.matrix, f'Shift+num to select axis', (20, 160), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
- cv.putText(video_frame.matrix, f'Right click and drag to edit axis', (20, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
- cv.putText(video_frame.matrix, f'Ctrl+s to save scene', (20, 240), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ cv.putText(visu_frame.matrix, f'Left click on marker to select scene', (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ cv.putText(visu_frame.matrix, f'T to translate, R to rotate', (20, 120), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ cv.putText(visu_frame.matrix, f'Shift+num to select axis', (20, 160), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ cv.putText(visu_frame.matrix, f'Right click and drag to edit axis', (20, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ cv.putText(visu_frame.matrix, f'Ctrl+s to save scene', (20, 240), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
# Project 3D scene on each video frame and the visualisation frame
if aruco_tracker.get_markers_number():
# Write detected marker ids
- cv.putText(video_frame.matrix, f'Detected markers : {aruco_tracker.get_markers_ids()}', (20, video_frame.height - 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+ cv.putText(visu_frame.matrix, f'Detected markers: {aruco_tracker.get_markers_ids()}', (20, visu_frame.height - 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
# Update selected marker id by left_clicking on marker
for (i, marker_id) in enumerate(aruco_tracker.get_markers_ids()):
@@ -256,86 +266,79 @@ def main():
# Retreive marker index
selected_marker_index = aruco_tracker.get_marker_index(selected_marker_id)
- # If AOI scene is found
- if aoi3D_scene != None:
-
- # Is the marker out of focus area ?
- marker_x, marker_y = aruco_tracker.get_marker_center(selected_marker_index)
- distance_to_center = ( (video_frame.width/2 - marker_x)**2 + (video_frame.height/2 - marker_y)**2 )**0.5
+ if aoi3D_scene == None:
+ raise UserWarning('No AOI 3D scene')
- if distance_to_center > int(video_frame.width/3):
+ # Select scene edit
+ aoi3D_scene_edit = aoi3D_scene_edit_selector(selected_marker_id)
- # Write warning
- cv.putText(video_frame.matrix, f'Out of focus area', (20, 120), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ # Edit scene
+ if aoi3D_scene_edit != None:
- # Select scene edit
- aoi3D_scene_edit = aoi3D_scene_edit_selector(selected_marker_id)
-
- # Edit scene
- if aoi3D_scene_edit != None:
+ marker_x, marker_y = aruco_tracker.get_marker_center(selected_marker_index)
- if right_button:
+ if right_button:
- pointer_delta_x, pointer_delta_y = (right_click[0] - marker_x) / (video_frame.width/3), (marker_y - right_click[1]) / (video_frame.width/3)
+ pointer_delta_x, pointer_delta_y = (right_click[0] - marker_x) / (visu_frame.width/3), (marker_y - right_click[1]) / (visu_frame.width/3)
- if edit_trans:
-
- # Edit scene rotation
- if edit_coord == 0:
- aoi3D_scene_edit['rotation'] = numpy.array([pointer_delta_y, aoi3D_scene_edit['rotation'][1], aoi3D_scene_edit['rotation'][2]])
+ if edit_trans:
+
+ # Edit scene rotation
+ if edit_coord == 0:
+ aoi3D_scene_edit['rotation'] = numpy.array([pointer_delta_y, aoi3D_scene_edit['rotation'][1], aoi3D_scene_edit['rotation'][2]])
- elif edit_coord == 1:
- aoi3D_scene_edit['rotation'] = numpy.array([aoi3D_scene_edit['rotation'][0], pointer_delta_x, aoi3D_scene_edit['rotation'][2]])
+ elif edit_coord == 1:
+ aoi3D_scene_edit['rotation'] = numpy.array([aoi3D_scene_edit['rotation'][0], pointer_delta_x, aoi3D_scene_edit['rotation'][2]])
- elif edit_coord == 2:
- aoi3D_scene_edit['rotation'] = numpy.array([aoi3D_scene_edit['rotation'][0], aoi3D_scene_edit['rotation'][1], -1*pointer_delta_y])
+ elif edit_coord == 2:
+ aoi3D_scene_edit['rotation'] = numpy.array([aoi3D_scene_edit['rotation'][0], aoi3D_scene_edit['rotation'][1], -1*pointer_delta_y])
- else:
+ else:
- # Edit scene translation
- if edit_coord == 0:
- aoi3D_scene_edit['translation'] = numpy.array([pointer_delta_x, aoi3D_scene_edit['translation'][1], aoi3D_scene_edit['translation'][2]])
+ # Edit scene translation
+ if edit_coord == 0:
+ aoi3D_scene_edit['translation'] = numpy.array([pointer_delta_x, aoi3D_scene_edit['translation'][1], aoi3D_scene_edit['translation'][2]])
- elif edit_coord == 1:
- aoi3D_scene_edit['translation'] = numpy.array([aoi3D_scene_edit['translation'][0], pointer_delta_y, aoi3D_scene_edit['translation'][2]])
+ elif edit_coord == 1:
+ aoi3D_scene_edit['translation'] = numpy.array([aoi3D_scene_edit['translation'][0], pointer_delta_y, aoi3D_scene_edit['translation'][2]])
- elif edit_coord == 2:
- aoi3D_scene_edit['translation'] = numpy.array([aoi3D_scene_edit['translation'][0], aoi3D_scene_edit['translation'][1], 2*pointer_delta_y])
+ elif edit_coord == 2:
+ aoi3D_scene_edit['translation'] = numpy.array([aoi3D_scene_edit['translation'][0], aoi3D_scene_edit['translation'][1], 2*pointer_delta_y])
- # Apply transformation
- aoi3D_scene_edited = aoi3D_scene.transform(aoi3D_scene_edit['translation'], aoi3D_scene_edit['rotation'])
+ # Apply transformation
+ aoi3D_scene_edited = aoi3D_scene.transform(aoi3D_scene_edit['translation'], aoi3D_scene_edit['rotation'])
- # Write rotation matrix
- R, _ = cv.Rodrigues(aoi3D_scene_edit['rotation'])
- cv.putText(video_frame.matrix, f'Rotation matrix:', (20, 160), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
- cv.putText(video_frame.matrix, f'{R[0][0]:.3f} {R[0][1]:.3f} {R[0][2]:.3f}', (40, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv.LINE_AA)
- cv.putText(video_frame.matrix, f'{R[1][0]:.3f} {R[1][1]:.3f} {R[1][2]:.3f}', (40, 240), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv.LINE_AA)
- cv.putText(video_frame.matrix, f'{R[2][0]:.3f} {R[2][1]:.3f} {R[2][2]:.3f}', (40, 280), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA)
-
- # Write translation vector
- T = aoi3D_scene_edit['translation']
- cv.putText(video_frame.matrix, f'Translation vector:', (20, 320), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
- cv.putText(video_frame.matrix, f'{T[0]:.3f}', (40, 360), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv.LINE_AA)
- cv.putText(video_frame.matrix, f'{T[1]:.3f}', (40, 400), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv.LINE_AA)
- cv.putText(video_frame.matrix, f'{T[2]:.3f}', (40, 440), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA)
+ # Write rotation matrix
+ R, _ = cv.Rodrigues(aoi3D_scene_edit['rotation'])
+ cv.putText(visu_frame.matrix, f'Rotation matrix:', (20, 160), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+ cv.putText(visu_frame.matrix, f'{R[0][0]:.3f} {R[0][1]:.3f} {R[0][2]:.3f}', (40, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv.LINE_AA)
+ cv.putText(visu_frame.matrix, f'{R[1][0]:.3f} {R[1][1]:.3f} {R[1][2]:.3f}', (40, 240), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv.LINE_AA)
+ cv.putText(visu_frame.matrix, f'{R[2][0]:.3f} {R[2][1]:.3f} {R[2][2]:.3f}', (40, 280), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA)
- # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it
- # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable.
- aoi2D_video_scene = aoi3D_scene_edited.project(aruco_tracker.get_marker_translation(selected_marker_index), aruco_tracker.get_marker_rotation(selected_marker_index), aruco_camera.get_K())
+ # Write translation vector
+ T = aoi3D_scene_edit['translation']
+ cv.putText(visu_frame.matrix, f'Translation vector:', (20, 320), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+ cv.putText(visu_frame.matrix, f'{T[0]:.3f}', (40, 360), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv.LINE_AA)
+ cv.putText(visu_frame.matrix, f'{T[1]:.3f}', (40, 400), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv.LINE_AA)
+ cv.putText(visu_frame.matrix, f'{T[2]:.3f}', (40, 440), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA)
+
+ # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it
+ # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable.
+ aoi2D_video_scene = aoi3D_scene_edited.project(aruco_tracker.get_marker_translation(selected_marker_index), aruco_tracker.get_marker_rotation(selected_marker_index), aruco_camera.get_K())
- # Draw scene
- aoi2D_video_scene.draw(video_frame.matrix, pointer, 2, exclude=['Visualisation_Plan'])
+ # Draw scene
+ aoi2D_video_scene.draw(visu_frame.matrix, pointer, 2, exclude=['Visualisation_Plan'])
- else:
+ # Write warning related to marker pose processing
+ except UserWarning as e:
- # Write error
- cv.putText(video_frame.matrix, f'Marker {selected_marker_id} have no AOI scene', (20, 120), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA)
+ cv.putText(visu_frame.matrix, f'Marker {selected_marker_id}: {e}', (20, 120), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA)
except ValueError:
# Write error
if selected_marker_id >= 0:
- cv.putText(video_frame.matrix, f'Marker {selected_marker_id} not found', (20, 120), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA)
+ cv.putText(visu_frame.matrix, f'Marker {selected_marker_id} not found', (20, 120), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA)
# Reset left_click
left_click = (0, 0)
@@ -390,7 +393,7 @@ def main():
load_configuration_file()
# Display video
- cv.imshow(f'Segment {tobii_segment.get_id()} ArUco marker editor', video_frame.matrix)
+ cv.imshow(f'Segment {tobii_segment.get_id()} ArUco marker editor', visu_frame.matrix)
# Wait 1 second
time.sleep(1)
diff --git a/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py b/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py
index 18b932d..f8e1dbc 100644
--- a/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py
+++ b/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py
@@ -209,13 +209,16 @@ def main():
for video_ts, video_frame in tobii_segment_video.frames():
video_ts_ms = video_ts / 1000
+
+ # Copy video frame to edit visualisation on it without disrupting aruco tracking
visu_frame = video_frame.copy()
# Write segment timing
- cv.putText(visu_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (127, 127, 127), 1, cv.LINE_AA)
+ cv.rectangle(visu_frame.matrix, (0, 0), (550, 50), (63, 63, 63), -1)
+ cv.putText(visu_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
# Draw focus area
- cv.rectangle(visu_frame.matrix, (int(video_frame.width/6), 0), (int(video_frame.width*(1-1/6)), int(video_frame.height)), (255, 150, 150), 1)
+ cv.rectangle(visu_frame.matrix, (int(video_frame.width/6), 0), (int(visu_frame.width*(1-1/6)), int(visu_frame.height)), (255, 150, 150), 1)
# Draw center
cv.line(visu_frame.matrix, (int(visu_frame.width/2) - 50, int(visu_frame.height/2)), (int(visu_frame.width/2) + 50, int(visu_frame.height/2)), (255, 150, 150), 1)
@@ -281,77 +284,74 @@ def main():
aruco_tracker.track(video_frame.matrix)
aruco_tracker.draw(visu_frame.matrix)
- # Project 3D scene on each video frame and the visualisation frame
- if aruco_tracker.get_markers_number():
-
- # Store aoi 2D video for further scene merging
- aoi2D_dict = {}
-
- for (i, marker_id) in enumerate(aruco_tracker.get_markers_ids()):
+ if aruco_tracker.get_markers_number() == 0:
+ raise UserWarning('No marker detected')
- # Process marker pose
- try:
+ # Store aoi 2D video for further scene merging
+ aoi2D_dict = {}
- # Copy 3D scene related to detected marker
- aoi3D_scene = aoi3D_scene_selector(marker_id)
-
- if aoi3D_scene == None:
- raise UserWarning('No AOI 3D scene')
+ # Project 3D scene on each video frame and the visualisation frame
+ for (i, marker_id) in enumerate(aruco_tracker.get_markers_ids()):
- # Transform scene into camera referential
- aoi3D_camera = aoi3D_scene.transform(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i))
+ # Process marker pose
+ try:
- # Get aoi inside vision cone field
- cone_vision_height_cm = nearest_gaze_position_3d.value[2]/10 # cm
- cone_vision_radius_cm = numpy.tan(numpy.deg2rad(tobii_visual_hfov / 2)) * cone_vision_height_cm
+ # Copy 3D scene related to detected marker
+ aoi3D_scene = aoi3D_scene_selector(marker_id)
+
+ if aoi3D_scene == None:
+ raise UserWarning('No AOI 3D scene')
- aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(cone_vision_radius_cm, cone_vision_height_cm)
+ # Transform scene into camera referential
+ aoi3D_camera = aoi3D_scene.transform(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i))
- # Keep only aoi inside vision cone field
- aoi3D_scene = aoi3D_scene.copy(exclude=aoi3D_outside.keys())
+ # Get aoi inside vision cone field
+ cone_vision_height_cm = nearest_gaze_position_3d.value[2]/10 # cm
+ cone_vision_radius_cm = numpy.tan(numpy.deg2rad(tobii_visual_hfov / 2)) * cone_vision_height_cm
- # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it
- # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable.
- aoi2D_video_scene = aoi3D_scene.project(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i), aruco_camera.get_K())
+ aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(cone_vision_radius_cm, cone_vision_height_cm)
- # Store each 2D aoi for further scene merging
- for name, aoi in aoi2D_video_scene.items():
+ # Keep only aoi inside vision cone field
+ aoi3D_scene = aoi3D_scene.copy(exclude=aoi3D_outside.keys())
- if name not in aoi2D_dict.keys():
- aoi2D_dict[name] = []
+ # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it
+ # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable.
+ aoi2D_video_scene = aoi3D_scene.project(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i), aruco_camera.get_K())
- aoi2D_dict[name].append(aoi.clockwise())
+ # Store each 2D aoi for further scene merging
+ for name, aoi in aoi2D_video_scene.items():
- # Select 2D visu scene if there is one for the detected marker
- aoi2D_visu_scene = aoi2D_visu_scene_selector(marker_id)
- aoi2D_visu_frame = aoi2D_visu_frame_selector(marker_id)
-
- if aoi2D_visu_scene == None:
- continue
-
- look_at = aoi2D_video_scene['Visualisation_Plan'].look_at(gaze_position_pixel)
+ if name not in aoi2D_dict.keys():
+ aoi2D_dict[name] = []
- visu_gaze_pixel = aoi2D_visu_scene['Visualisation_Plan'].looked_pixel(look_at)
- cv.circle(aoi2D_visu_frame, visu_gaze_pixel, 4, (0, 0, 255), -1)
+ aoi2D_dict[name].append(aoi.clockwise())
- # Write warning related to marker pose processing
- except UserWarning as e:
-
- cv.putText(visu_frame.matrix, f'Marker {marker_id}: {e}', (20, int(visu_frame.height) - (marker_id+1) * 40), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ # Select 2D visu scene if there is one for the detected marker
+ aoi2D_visu_scene = aoi2D_visu_scene_selector(marker_id)
+ aoi2D_visu_frame = aoi2D_visu_frame_selector(marker_id)
+
+ if aoi2D_visu_scene == None:
+ continue
+
+ look_at = aoi2D_video_scene['Visualisation_Plan'].look_at(gaze_position_pixel)
- # Merge all 2D aoi into a single 2D scene
- aoi2D_merged_scene = AOI2DScene.AOI2DScene()
- for name, aoi_array in aoi2D_dict.items():
- aoi2D_merged_scene[name] = numpy.sum(aoi_array, axis=0) / len(aoi_array)
+ visu_gaze_pixel = aoi2D_visu_scene['Visualisation_Plan'].looked_pixel(look_at)
+ cv.circle(aoi2D_visu_frame, visu_gaze_pixel, 4, (0, 0, 255), -1)
- aoi2D_merged_scene.draw(visu_frame.matrix, gaze_position_pixel, gaze_accuracy_pixel, exclude=['Visualisation_Plan'])
-
- # Store 2D merged scene at this time in millisecond
- ts_aois_scenes[round(video_ts_ms)] = aoi2D_merged_scene
+ # Write warning related to marker pose processing
+ except UserWarning as e:
+
+ cv.putText(visu_frame.matrix, f'Marker {marker_id}: {e}', (20, int(visu_frame.height) - (marker_id+1) * 40), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
- else:
+ # Merge all 2D aoi into a single 2D scene
+ aoi2D_merged_scene = AOI2DScene.AOI2DScene()
+ for name, aoi_array in aoi2D_dict.items():
+ aoi2D_merged_scene[name] = numpy.sum(aoi_array, axis=0) / len(aoi_array)
- raise UserWarning('No marker detected')
+ aoi2D_merged_scene.draw(visu_frame.matrix, gaze_position_pixel, gaze_accuracy_pixel, exclude=['Visualisation_Plan'])
+
+ # Store 2D merged scene at this time in millisecond
+ ts_aois_scenes[round(video_ts_ms)] = aoi2D_merged_scene
# Write warning related to video and data frame processing
except UserWarning as e:
diff --git a/src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py b/src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py
index 1ab02e4..3fea974 100644
--- a/src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py
+++ b/src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py
@@ -93,108 +93,148 @@ def main():
try:
past_gaze_positions = DataStructures.TimeStampedBuffer()
+ past_head_rotations = DataStructures.TimeStampedBuffer()
+
+ head_moving = False
+ head_movement_last = 0.
while tobii_video_stream.is_alive():
video_ts, video_frame = tobii_video_stream.read()
- # Track markers with pose estimation and draw them
- aruco_tracker.track(video_frame.matrix)
- aruco_tracker.draw(video_frame.matrix)
+ # Copy video frame to edit visualisation on it without disrupting aruco tracking
+ visu_frame = video_frame.copy()
+ # Write stream timing
+ cv.rectangle(visu_frame.matrix, (0, 0), (550, 50), (63, 63, 63), -1)
+ cv.putText(visu_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+
+ # Draw focus area
+ cv.rectangle(visu_frame.matrix, (int(video_frame.width/6), 0), (int(visu_frame.width*(1-1/6)), int(visu_frame.height)), (255, 150, 150), 1)
+
+ # Draw center
+ cv.line(visu_frame.matrix, (int(visu_frame.width/2) - 50, int(visu_frame.height/2)), (int(visu_frame.width/2) + 50, int(visu_frame.height/2)), (255, 150, 150), 1)
+ cv.line(visu_frame.matrix, (int(visu_frame.width/2), int(visu_frame.height/2) - 50), (int(visu_frame.width/2), int(visu_frame.height/2) + 50), (255, 150, 150), 1)
+
+ # Process video and data frame
try:
# Read data stream
data_stream = tobii_data_stream.read()
- # Store received gaze positions
+ # Store last received data
+ past_head_rotations.append(data_stream['Gyroscope'])
past_gaze_positions.append(data_stream['GazePosition'])
+ # Get nearest head rotation before video timestamp and remove all head rotations before
+ _, nearest_head_rotation = tobii_ts_head_rotations.pop_first_until(video_ts)
+
+ # Calculate head movement considering only head yaw and pitch
+ head_movement = numpy.array(nearest_head_rotation.value)
+ head_movement_px = head_movement.astype(int)
+ head_movement_norm = numpy.linalg.norm(head_movement[0:2])
+
+ # Draw movement vector
+ cv.line(visu_frame.matrix, (int(visu_frame.width/2), int(visu_frame.height/2)), (int(visu_frame.width/2) + head_movement_px[1], int(visu_frame.height/2) - head_movement_px[0]), (150, 150, 150), 3)
+
+ # Head movement detection hysteresis
+ # TODO : pass the threshold value as argument
+ if not head_moving and head_movement_norm > 50:
+ head_moving = True
+
+ if head_moving and head_movement_norm < 10:
+ head_moving = False
+
+ # Ignore frame where head is moving
+ if head_moving:
+ raise UserWarning('Head is moving')
+
# Get nearest gaze position before video timestamp and remove all gaze positions before
_, nearest_gaze_position = tobii_ts_gaze_positions.pop_first_until(video_ts)
- # Get nearest gaze position 3D before video timestamp and remove all gaze positions before
- _, nearest_gaze_position_3d = tobii_ts_gaze_positions_3d.pop_first_until(video_ts)
-
- # Consider gaze position if gaze precision can be evaluated
- if nearest_gaze_position_3d.value[2] > 0:
+ gaze_position_pixel = (int(nearest_gaze_position.value[0] * visu_frame.width), int(nearest_gaze_position.value[1] * visu_frame.height))
- gaze_position_pixel = (int(nearest_gaze_position.value[0] * video_frame.width), int(nearest_gaze_position.value[1] * video_frame.height))
+ # Draw gaze position
+ cv.circle(visu_frame.matrix, gaze_position_pixel, 2, (0, 255, 255), -1)
- gaze_accuracy_mm = numpy.tan(numpy.deg2rad(tobii_accuracy)) * nearest_gaze_position_3d.value[2]
- tobii_camera_hfov_mm = numpy.tan(numpy.deg2rad(tobii_camera_hfov / 2)) * nearest_gaze_position_3d.value[2]
- gaze_accuracy_pixel = round(video_frame.width * float(gaze_accuracy_mm) / float(tobii_camera_hfov_mm))
+ # Get nearest gaze position 3D before video timestamp and remove all gaze positions before
+ _, nearest_gaze_position_3d = tobii_ts_gaze_positions_3d.pop_first_until(video_ts)
- # Draw gaze position and accuracy
- cv.circle(video_frame.matrix, gaze_position_pixel, 2, (0, 255, 255), -1)
- cv.circle(video_frame.matrix, gaze_position_pixel, gaze_accuracy_pixel, (0, 255, 255), 1)
+ # Ignore frame when gaze precison can't be evaluated
+ if nearest_gaze_position_3d.value[2] <= 0:
+ raise UserWarning('Negative Z gaze position 3D value')
- else:
+ gaze_position_pixel = (int(nearest_gaze_position.value[0] * visu_frame.width), int(nearest_gaze_position.value[1] * visu_frame.height))
- ValueError('Unable to evaluate gaze precision')
+ gaze_accuracy_mm = numpy.tan(numpy.deg2rad(tobii_accuracy)) * nearest_gaze_position_3d.value[2]
+ tobii_camera_hfov_mm = numpy.tan(numpy.deg2rad(tobii_camera_hfov / 2)) * nearest_gaze_position_3d.value[2]
+ gaze_accuracy_pixel = round(visu_frame.width * float(gaze_accuracy_mm) / float(tobii_camera_hfov_mm))
- # Wait for gaze position
- except (AttributeError, ValueError):
- continue
+ # Draw gaze position and accuracy
+ cv.circle(visu_frame.matrix, gaze_position_pixel, 2, (0, 255, 255), -1)
+ cv.circle(visu_frame.matrix, gaze_position_pixel, gaze_accuracy_pixel, (0, 255, 255), 1)
- # Draw focus area
- cv.circle(video_frame.matrix, (int(video_frame.width/2), int(video_frame.height/2)), int(video_frame.width/3), (255, 150, 150), 1)
+ # Hide frame left and right borders before tracking to ignore markers outside focus area
+ cv.rectangle(video_frame.matrix, (0, 0), (int(video_frame.width/6), int(video_frame.height)), (0, 0, 0), -1)
+ cv.rectangle(video_frame.matrix, (int(video_frame.width*(1 - 1/6)), 0), (int(video_frame.width), int(video_frame.height)), (0, 0, 0), -1)
- # Draw focus area center
- cv.line(video_frame.matrix, (int(video_frame.width/2) - 50, int(video_frame.height/2)), (int(video_frame.width/2) + 50, int(video_frame.height/2)), (255, 150, 150), 1)
- cv.line(video_frame.matrix, (int(video_frame.width/2), int(video_frame.height/2) - 50), (int(video_frame.width/2), int(video_frame.height/2) + 50), (255, 150, 150), 1)
+ # Track markers with pose estimation and draw them
+ aruco_tracker.track(video_frame.matrix)
+ aruco_tracker.draw(visu_frame.matrix)
- # Project 3D scenes related to each aruco markers
- if aruco_tracker.get_markers_number():
+ if aruco_tracker.get_markers_number() == 0:
+ raise UserWarning('No marker detected')
# Store aoi 2D video for further scene merging
aoi2D_dict = {}
+ # Project 3D scenes related to each aruco markers
for (i, marker_id) in enumerate(aruco_tracker.get_markers_ids()):
- # Select 3D scene related to detected marker
- aoi3D_scene = aoi3D_scene_selector(marker_id)
-
- if aoi3D_scene == None:
- continue
+ # Process marker pose
+ try:
- # Ignore marker out of focus area
- marker_x, marker_y = aruco_tracker.get_marker_center(i)
- distance_to_center = ( (video_frame.width/2 - marker_x)**2 + (video_frame.height/2 - marker_y)**2 )**0.5
+ # Select 3D scene related to detected marker
+ aoi3D_scene = aoi3D_scene_selector(marker_id)
+
+ if aoi3D_scene == None:
+ raise UserWarning('No AOI 3D scene')
- if distance_to_center > int(video_frame.width/3):
- continue
+ # Transform scene into camera referential
+ aoi3D_camera = aoi3D_scene.transform(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i))
- # Transform scene into camera referential
- aoi3D_camera = aoi3D_scene.transform(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i))
+ # Get aoi inside vision cone field
+ cone_vision_height_cm = nearest_gaze_position_3d.value[2]/10 # cm
+ cone_vision_radius_cm = numpy.tan(numpy.deg2rad(tobii_visual_hfov / 2)) * cone_vision_height_cm
- # Get aoi inside vision cone field
- cone_vision_height_cm = nearest_gaze_position_3d.value[2]/10 # cm
- cone_vision_radius_cm = numpy.tan(numpy.deg2rad(tobii_visual_hfov / 2)) * cone_vision_height_cm
+ aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(cone_vision_radius_cm, cone_vision_height_cm)
- aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(cone_vision_radius_cm, cone_vision_height_cm)
+ # Keep only aoi inside vision cone field
+ aoi3D_scene = aoi3D_scene.copy(exclude=aoi3D_outside.keys())
- # Keep only aoi inside vision cone field
- aoi3D_scene = aoi3D_scene.copy(exclude=aoi3D_outside.keys())
+ # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it
+ # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable.
+ aoi2D_video_scene = aoi3D_scene.project(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i), aruco_camera.get_K())
- # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it
- # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable.
- aoi2D_video_scene = aoi3D_scene.project(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i), aruco_camera.get_K())
+ # Store each 2D aoi for further scene merging
+ for name, aoi in aoi2D_video_scene.items():
- # Store each 2D aoi for further scene merging
- for name, aoi in aoi2D_video_scene.items():
+ if name not in aoi2D_dict.keys():
+ aoi2D_dict[name] = []
- if name not in aoi2D_dict.keys():
- aoi2D_dict[name] = []
+ aoi2D_dict[name].append(aoi.clockwise())
- aoi2D_dict[name].append(aoi.clockwise())
+ # Write warning related to marker pose processing
+ except UserWarning as e:
+
+ cv.putText(visu_frame.matrix, f'Marker {marker_id}: {e}', (20, int(visu_frame.height) - (marker_id+1) * 40), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
# Merge all 2D aoi into a single 2D scene
aoi2D_merged_scene = AOI2DScene.AOI2DScene()
for name, aoi_array in aoi2D_dict.items():
aoi2D_merged_scene[name] = numpy.sum(aoi_array, axis=0) / len(aoi_array)
- aoi2D_merged_scene.draw(video_frame.matrix, video_gaze_pixel, gaze_accuracy_pixel, exclude=['Visualisation_Plan'])
+ aoi2D_merged_scene.draw(visu_frame.matrix, video_gaze_pixel, gaze_accuracy_pixel, exclude=['Visualisation_Plan'])
# Send look at aoi pointer
for name, aoi in aoi2D_merged_scene.items():
@@ -207,11 +247,19 @@ def main():
else:
IvySendMsg(f'looking {name}')
+ # Write warning related to video and data frame processing
+ except UserWarning as e:
+
+ cv.putText(visu_frame.matrix, str(e), (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+
+ except ValueError:
+ pass
+
# Close window using 'Esc' key
if cv.waitKey(1) == 27:
break
- cv.imshow('Live Scene', video_frame.matrix)
+ cv.imshow('Live Scene', visu_frame.matrix)
# Exit on 'ctrl+C' interruption
except KeyboardInterrupt: