aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/argaze/utils/environment_edit.py206
1 files changed, 128 insertions, 78 deletions
diff --git a/src/argaze/utils/environment_edit.py b/src/argaze/utils/environment_edit.py
index ae45769..038c569 100644
--- a/src/argaze/utils/environment_edit.py
+++ b/src/argaze/utils/environment_edit.py
@@ -23,7 +23,7 @@ def main():
parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
parser.add_argument('environment', metavar='ENVIRONMENT', type=str, help='ar environment filepath')
parser.add_argument('movie', metavar='MOVIE', type=str, default=None, help='movie path')
- parser.add_argument('-s','--start', metavar='START', type=float, default=None, help='start time in second')
+ parser.add_argument('-s','--start', metavar='START', type=float, default=0., help='start time in second')
parser.add_argument('-o', '--output', metavar='OUT', type=str, default='environment.json', help='edited ar environment file path')
args = parser.parse_args()
@@ -46,6 +46,7 @@ def main():
right_button = False
edit_trans = False # translate
edit_z = False
+ hide = False
# Update pointer position
def on_mouse_event(event, x, y, flags, param):
@@ -110,7 +111,7 @@ def main():
gaze_position = GazeFeatures.GazePosition(pointer, precision=2)
# Select a new frame and detect markers once
- if next_frame_index != current_frame_index or refresh_detection:
+ if next_frame_index != current_frame_index or refresh_detection or hide:
video_capture.set(cv2.CAP_PROP_POS_FRAMES, next_frame_index)
@@ -120,6 +121,12 @@ def main():
current_frame_index = video_capture.get(cv2.CAP_PROP_POS_FRAMES) - 1
current_frame_time = video_capture.get(cv2.CAP_PROP_POS_MSEC)
+
+ # Hide zone
+ if hide:
+
+ # Draw black circle under pointer
+ cv2.circle(video_frame, pointer, 50, (0, 0, 0), -1)
# Detect markers
ar_environment.aruco_detector.detect_markers(video_frame)
@@ -131,7 +138,7 @@ def main():
cv2.rectangle(video_frame, (int(frame_width/6), 0), (int(frame_width*(1-1/6)), int(frame_height)), (255, 150, 150), 1)
# Write timing
- cv2.rectangle(video_frame, (0, 0), (700, 50), (63, 63, 63), -1)
+ cv2.rectangle(video_frame, (0, 0), (frame_width, 50), (63, 63, 63), -1)
cv2.putText(video_frame, f'Time: {int(current_frame_time)} ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
# Copy frame
@@ -144,10 +151,7 @@ def main():
# Draw detected markers
ar_environment.aruco_detector.draw_detected_markers(video_frame)
-
- # Write detected marker ids
- cv2.putText(video_frame, f'Detected markers: {list(ar_environment.aruco_detector.detected_markers.keys())}', (20, frame_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
-
+
# Handle marker selection on left click
if len(ar_environment.aruco_detector.detected_markers) > 0:
@@ -160,95 +164,132 @@ def main():
selected_marker_id = marker_id
- # If a marker is selected
try:
-
- # Retreive selected marker
- selected_marker = ar_environment.aruco_detector.detected_markers[selected_marker_id]
- # Estimate selected marker pose
- ar_environment.aruco_detector.estimate_markers_pose([selected_marker_id])
+ # A marker is selected
+ if selected_marker_id >= 0:
- # Retreive selected marker place
- selected_place = ar_scene.aruco_scene.places[selected_marker_id]
+ try:
+
+ # Retreive selected marker
+ selected_marker = ar_environment.aruco_detector.detected_markers[selected_marker_id]
- # On right click
- if right_button:
+ # Estimate selected marker pose
+ ar_environment.aruco_detector.estimate_markers_pose([selected_marker_id])
- pointer_delta_x, pointer_delta_y = right_drag[0] / frame_width, right_drag[1] / frame_height
+ # Retreive selected marker place
+ selected_place = ar_scene.aruco_scene.places[selected_marker_id]
- place_edit[selected_marker_id] = {'rotation': (0, 0, 0), 'translation': (0, 0, 0)}
+ # On right click
+ if right_button:
- if edit_trans:
-
- # Edit place rotation
- if edit_z:
- place_edit[selected_marker_id]['rotation'] = (0, 0, -pointer_delta_y)
- else:
- place_edit[selected_marker_id]['rotation'] = (pointer_delta_y, pointer_delta_x, 0)
+ pointer_delta_x, pointer_delta_y = right_drag[0] / frame_width, right_drag[1] / frame_height
+
+ place_edit[selected_marker_id] = {'rotation': (0, 0, 0), 'translation': (0, 0, 0)}
+
+ if edit_trans:
+
+ # Edit place rotation
+ if edit_z:
+ place_edit[selected_marker_id]['rotation'] = (0, 0, -pointer_delta_y)
+ else:
+ place_edit[selected_marker_id]['rotation'] = (pointer_delta_y, pointer_delta_x, 0)
+
+ else:
+
+ # Edit place translation
+ if edit_z:
+ place_edit[selected_marker_id]['translation'] = (0, 0, pointer_delta_y)
+ else:
+ place_edit[selected_marker_id]['translation'] = (-pointer_delta_x, pointer_delta_y, 0)
+
+ # Apply transformations
+ R = selected_place.rotation.dot(ArUcoScene.make_rotation_matrix(*place_edit[selected_marker_id]['rotation']).T)
+ T = selected_place.translation + numpy.array(place_edit[selected_marker_id]['translation'])
- else:
+ edited_place = ArUcoScene.Place(T, R, selected_marker)
- # Edit place translation
- if edit_z:
- place_edit[selected_marker_id]['translation'] = (0, 0, pointer_delta_y)
else:
- place_edit[selected_marker_id]['translation'] = (-pointer_delta_x, pointer_delta_y, 0)
-
- # Apply transformations
- R = selected_place.rotation.dot(ArUcoScene.make_rotation_matrix(*place_edit[selected_marker_id]['rotation']).T)
- T = selected_place.translation + numpy.array(place_edit[selected_marker_id]['translation'])
- edited_place = ArUcoScene.Place(T, R, selected_marker)
+ edited_place = selected_place
+
+ cv2.rectangle(video_frame, (0, 130), (460, 450), (127, 127, 127), -1)
+
+ # Write edited rotation matrix
+ R = edited_place.rotation
+ cv2.putText(video_frame, f'Rotation matrix:', (20, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_frame, f'{R[0][0]:.3f} {R[0][1]:.3f} {R[0][2]:.3f}', (40, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_frame, f'{R[1][0]:.3f} {R[1][1]:.3f} {R[1][2]:.3f}', (40, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(video_frame, f'{R[2][0]:.3f} {R[2][1]:.3f} {R[2][2]:.3f}', (40, 280), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
+
+ # Write edited translation vector
+ T = edited_place.translation
+ cv2.putText(video_frame, f'Translation vector:', (20, 320), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_frame, f'{T[0]:.3f}', (40, 360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_frame, f'{T[1]:.3f}', (40, 400), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(video_frame, f'{T[2]:.3f}', (40, 440), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
+
+ # Replace selected place by edited place
+ ar_scene.aruco_scene.places[selected_marker_id] = edited_place
+
+ # Estimate scene pose considering only selected marker
+ tvec, rmat, strategy, _ = ar_scene.estimate_pose({selected_marker_id: selected_marker})
+ strategy = strategy.replace('_', ' ')
+
+ # Write pose estimation strategy
+ cv2.putText(video_frame, f'{strategy} with marker {selected_marker_id}', (20, frame_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+
+ # Draw expected marker places
+ ar_scene.draw_places(video_frame)
+
+ # Project AOI scene into frame according estimated pose
+ aoi_scene_projection = ar_scene.project(tvec, rmat, visual_hfov=TobiiSpecifications.VISUAL_HFOV)
+
+ # Draw AOI scene projection with gaze
+ aoi_scene_projection.draw_circlecast(video_frame, gaze_position)
+ # Catch missing selected marker
+ except KeyError:
+
+ cv2.putText(video_frame, f'Marker {selected_marker_id} not found', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
+
+ # No marker selected
else:
- edited_place = selected_place
-
- cv2.rectangle(video_frame, (0, 130), (460, 450), (127, 127, 127), -1)
-
- # Write edited rotation matrix
- R = edited_place.rotation
- cv2.putText(video_frame, f'Rotation matrix:', (20, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_frame, f'{R[0][0]:.3f} {R[0][1]:.3f} {R[0][2]:.3f}', (40, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(video_frame, f'{R[1][0]:.3f} {R[1][1]:.3f} {R[1][2]:.3f}', (40, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(video_frame, f'{R[2][0]:.3f} {R[2][1]:.3f} {R[2][2]:.3f}', (40, 280), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
-
- # Write edited translation vector
- T = edited_place.translation
- cv2.putText(video_frame, f'Translation vector:', (20, 320), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_frame, f'{T[0]:.3f}', (40, 360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(video_frame, f'{T[1]:.3f}', (40, 400), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(video_frame, f'{T[2]:.3f}', (40, 440), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
+ try:
- # Replace selected place by edited place
- ar_scene.aruco_scene.places[selected_marker_id] = edited_place
+ # Try to build AOI scene from detected ArUco marker corners
+ aoi_scene_projection = ar_scene.build_aruco_aoi_scene(ar_environment.aruco_detector.detected_markers)
- # Estimate scene pose considering only selected marker
- tvec, rmat, _ = ar_scene.estimate_pose({selected_marker_id: selected_marker})
+ # Write pose estimation strategy
+ cv2.putText(video_frame, f'build aruco aoi scene with markers {list(ar_environment.aruco_detector.detected_markers.keys())}', (20, frame_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
- # Draw expected marker places
- ar_scene.draw_places(video_frame)
+ except:
- # Project AOI scene into frame according estimated pose
- aoi_scene_projection = ar_scene.project(tvec, rmat, visual_hfov=TobiiSpecifications.VISUAL_HFOV)
+ # Estimate all marker's pose
+ ar_environment.aruco_detector.estimate_markers_pose()
- # Draw AOI scene projection with gaze
- aoi_scene_projection.draw_circlecast(video_frame, gaze_position)
+ # Estimate scene pose considering all markers
+ tvec, rmat, strategy, consistent_markers = ar_scene.estimate_pose(ar_environment.aruco_detector.detected_markers)
+ strategy = strategy.replace('_', ' ')
- # Catch missing selected marker
- except KeyError:
+ # Write pose estimation strategy
+ cv2.putText(video_frame, f'{strategy} with markers {list(consistent_markers.keys())}', (20, frame_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
- # Write error
- if selected_marker_id >= 0:
+ # Draw expected marker places
+ ar_scene.draw_places(video_frame)
+
+ # Project AOI scene into frame according estimated pose
+ aoi_scene_projection = ar_scene.project(tvec, rmat, visual_hfov=TobiiSpecifications.VISUAL_HFOV)
- cv2.putText(video_frame, f'Marker {selected_marker_id} not found', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
+ # Draw AOI scene projection with gaze
+ aoi_scene_projection.draw_circlecast(video_frame, gaze_position)
# Catch exceptions raised by estimate_pose and project methods
except (ArFeatures.PoseEstimationFailed, ArFeatures.SceneProjectionFailed) as e:
- cv2.rectangle(video_frame, (0, 50), (700, 100), (127, 127, 127), -1)
- cv2.putText(video_frame, f'Error: {e}', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.rectangle(video_frame, (0, 90), (700, 130), (127, 127, 127), -1)
+ cv2.putText(video_frame, f'Error: {e}', (20, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
# Draw frame
cv2.imshow(ar_environment.name, video_frame)
@@ -259,7 +300,7 @@ def main():
# Write selected marker id
if selected_marker_id >= 0:
- cv2.rectangle(video_frame, (0, 50), (550, 90), (127, 127, 127), -1)
+ cv2.rectangle(video_frame, (0, 50), (700, 90), (127, 127, 127), -1)
# Select color
if edit_z:
@@ -276,12 +317,13 @@ def main():
# Write documentation
else:
- cv2.rectangle(video_frame, (0, 50), (650, 250), (127, 127, 127), -1)
- cv2.putText(video_frame, f'> Left click on marker: select scene', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_frame, f'> T: translate, R: rotate', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_frame, f'> Z: switch Z axis edition', (20, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_frame, f'> Right click and drag: edit XY axis', (20, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_frame, f'> Ctrl + S: save scene', (20, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.rectangle(video_frame, (0, 130), (700, 380), (127, 127, 127), -1)
+ cv2.putText(video_frame, f'> Left click on marker: select marker', (20, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_frame, f'> Backspace: unselect marker', (20, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_frame, f'> T: translate, R: rotate', (20, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_frame, f'> Z: switch Z axis edition', (20, 280), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_frame, f'> Right click and drag: edit XY axis', (20, 320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_frame, f'> Ctrl + S: save scene', (20, 360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
# Reset left_click
left_click = (0, 0)
@@ -289,7 +331,7 @@ def main():
key_pressed = cv2.waitKey(10)
#if key_pressed != -1:
- # print(key_pressed)
+ # print(key_pressed)
# Select previous frame with left arrow
if key_pressed == 2:
@@ -303,6 +345,10 @@ def main():
if next_frame_index < 0:
next_frame_index = 0
+ # Unselect marker with backspace key
+ if key_pressed == 127:
+ selected_marker_id = -1
+
# Edit rotation with r key
if key_pressed == 114:
edit_trans = True
@@ -315,6 +361,10 @@ def main():
if key_pressed == 122:
edit_z = not edit_z
+ # Switch hide mode
+ if key_pressed == 104:
+ hide = not hide
+
# Save selected marker edition using 'Ctrl + s'
if key_pressed == 19:
ar_environment.to_json(args.output)