aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorThéo de la Hogue2024-03-27 18:08:30 +0100
committerThéo de la Hogue2024-03-27 18:08:30 +0100
commit58ef2e8276318d76fa2b11a257ed65850352c7fd (patch)
tree6979db7d6ebecb7ce8a95f787f342154e264dc7d /src
parent8e5dba2318e86f6dbd787ad2c59ab86686e19215 (diff)
downloadargaze-58ef2e8276318d76fa2b11a257ed65850352c7fd.zip
argaze-58ef2e8276318d76fa2b11a257ed65850352c7fd.tar.gz
argaze-58ef2e8276318d76fa2b11a257ed65850352c7fd.tar.bz2
argaze-58ef2e8276318d76fa2b11a257ed65850352c7fd.tar.xz
Updating demonstration.
Diffstat (limited to 'src')
-rw-r--r--src/argaze/ArFeatures.py13
-rw-r--r--src/argaze/DataFeatures.py14
-rw-r--r--src/argaze/utils/aruco_camera_configuration_edit.py735
-rw-r--r--src/argaze/utils/context_run.py (renamed from src/argaze/utils/pipeline_run.py)2
-rw-r--r--src/argaze/utils/contexts/OpenCV.py62
-rw-r--r--src/argaze/utils/contexts/TobiiProGlasses2.py (renamed from src/argaze/utils/eyetrackers/TobiiProGlasses2.py)0
-rw-r--r--src/argaze/utils/contexts/__init__.py (renamed from src/argaze/utils/eyetrackers/__init__.py)0
-rw-r--r--src/argaze/utils/demo/demo.movbin13345258 -> 0 bytes
-rw-r--r--src/argaze/utils/demo/opencv_window_context_setup.json10
-rw-r--r--src/argaze/utils/demo/tobii_live_stream_context_setup.json (renamed from src/argaze/utils/demo/eyetracker_setup.json)2
-rw-r--r--src/argaze/utils/demo_aruco_markers_run.py203
-rw-r--r--src/argaze/utils/demo_gaze_analysis_run.py274
12 files changed, 90 insertions, 1225 deletions
diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py
index d78d80a..6b5589e 100644
--- a/src/argaze/ArFeatures.py
+++ b/src/argaze/ArFeatures.py
@@ -1372,7 +1372,9 @@ class ArContext(DataFeatures.PipelineStepObject):
self.__pipeline = None
self.__exceptions = DataFeatures.TimestampedExceptions()
+ # Init gaze position processing assement
self.__process_gaze_position_chrono = UtilsFeatures.TimeProbe()
+ self.__process_gaze_position_frequency = 0
# Init protected attributes
self._image_parameters = DEFAULT_ARCONTEXT_IMAGE_PARAMETERS
@@ -1423,7 +1425,7 @@ class ArContext(DataFeatures.PipelineStepObject):
return self
@DataFeatures.PipelineStepEnter
- def __exit__(self, type, value, traceback):
+ def __exit__(self, exception_type, exception_value, exception_traceback):
"""Exit from ArContext."""
pass
@@ -1432,12 +1434,13 @@ class ArContext(DataFeatures.PipelineStepObject):
logging.debug('%s._process_gaze_position', type(self).__name__)
- lap_time, nb_laps, elapsed_time = gaze_position_processing_chrono.lap()
+ # Assess gaze position processing frequency
+ lap_time, nb_laps, elapsed_time = self.__process_gaze_position_chrono.lap()
if elapsed_time > 1e3:
- gaze_positions_frequency = nb_laps
- gaze_position_processing_chrono.restart()
+ self.__process_gaze_position_frequency = nb_laps
+ self.__process_gaze_position_chrono.restart()
if issubclass(type(self.__pipeline), ArFrame):
@@ -1537,7 +1540,7 @@ class ArContext(DataFeatures.PipelineStepObject):
look_time = math.nan
info_stack += 1
- cv2.putText(image, f'Look {look_time:.2f}ms', (20, info_stack * 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Look {look_time:.2f}ms at {self.__process_gaze_position_frequency}Hz', (20, info_stack * 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
if draw_exceptions:
diff --git a/src/argaze/DataFeatures.py b/src/argaze/DataFeatures.py
index 674a8d9..6ae0603 100644
--- a/src/argaze/DataFeatures.py
+++ b/src/argaze/DataFeatures.py
@@ -660,22 +660,22 @@ def PipelineStepEnter(method):
def PipelineStepExit(method):
"""Define a decorator use into PipelineStepObject class to wrap pipeline step __exit__ method."""
- def wrapper(self, exception_type, exception_value, exception_traceback):
+ def wrapper(self, *args):
"""Wrap pipeline step __exit__ method to call super, observers and children __exit__ method."""
- PipelineStepObject.__exit__(self, exception_type, exception_value, exception_traceback)
+ PipelineStepObject.__exit__(self, *args)
# Stop observers
for observer in self.observers:
- observer.__exit__(exception_type, exception_value, exception_traceback)
+ observer.__exit__(*args)
# Stop children pipeline step objects
for child in self.children:
- child.__exit__(exception_type, exception_value, exception_traceback)
+ child.__exit__(*args)
- method(self, exception_type, exception_value, exception_traceback)
+ method(self, *args)
return wrapper
@@ -787,7 +787,7 @@ class PipelineStepObject():
return self
- def __exit__(self, type, value, traceback):
+ def __exit__(self, exception_type, exception_value, exception_traceback):
"""Define default method to exit from pipeline step object context."""
logging.debug('PipelineStepObject.__exit__')
@@ -1077,7 +1077,7 @@ class PipelineStepObserver():
"""
return self
- def __exit__(self, type, value, traceback):
+ def __exit__(self, exception_type, exception_value, exception_traceback):
"""
Define abstract __exit__ method to use observer as a context.
diff --git a/src/argaze/utils/aruco_camera_configuration_edit.py b/src/argaze/utils/aruco_camera_configuration_edit.py
deleted file mode 100644
index 686f25e..0000000
--- a/src/argaze/utils/aruco_camera_configuration_edit.py
+++ /dev/null
@@ -1,735 +0,0 @@
-#!/usr/bin/env python
-
-"""
-
-This program is free software: you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free Software
-Foundation, either version 3 of the License, or (at your option) any later
-version.
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-You should have received a copy of the GNU General Public License along with
-this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-
-__author__ = "Théo de la Hogue"
-__credits__ = []
-__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)"
-__license__ = "GPLv3"
-
-import argparse
-import time
-import itertools
-
-from argaze import ArFeatures, GazeFeatures
-from argaze.AreaOfInterest import AOIFeatures
-from argaze.ArUcoMarkers import ArUcoCamera
-from argaze.utils import UtilsFeatures
-
-from tobiiproglasses2 import *
-
-import cv2
-import numpy
-
-def main():
- """
- Load ArUco camera configuration from .json file, detect ArUco markers into movie images and estimate scene pose.
- Edit configuration to improve pose estimation.
- """
-
- # Manage arguments
- parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
- parser.add_argument('movie', metavar='MOVIE', type=str, default=None, help='movie path')
- parser.add_argument('configuration', metavar='CONFIGURATION', type=str, help='argaze configuration filepath')
-
- parser.add_argument('-s','--start', metavar='START', type=float, default=0., help='start time in second')
- parser.add_argument('-o', '--output', metavar='OUT', type=str, default='edited_configuration.json', help='edited configuration file path')
- parser.add_argument('-v', '--verbose', action='store_true', default=False, help='enable verbose mode to print information in console')
-
- args = parser.parse_args()
-
- # Load configuration
- aruco_camera = ArUcoCamera.ArUcoCamera.from_json(args.configuration)
-
- if args.verbose:
-
- print(aruco_camera)
-
- # Select first AR scene
- ar_scene = list(aruco_camera.scenes.values())[0]
-
- # Create a window to display AR environment
- cv2.namedWindow(aruco_camera.name, cv2.WINDOW_AUTOSIZE)
-
- # Init mouse interaction
- pointer = (0, 0)
- left_click = (0, 0)
- right_click = (0, 0)
- right_drag = (0, 0)
- right_button = False
- edit_trans = False # translate
- edit_z = False
- snap = False
- draw_help = False
- draw_grid = False
- draw_cover = False
- pose_mode = 0
- z_grid = 100.
-
- # Update pointer position
- def on_mouse_event(event, x, y, flags, param):
-
- nonlocal pointer
- nonlocal left_click
- nonlocal right_click
- nonlocal right_drag
- nonlocal right_button
-
- # Update pointer
- pointer = (x, y)
-
- # Update left_click
- if event == cv2.EVENT_LBUTTONUP:
-
- left_click = pointer
-
- # Udpate right_button
- elif event == cv2.EVENT_RBUTTONDOWN and not right_button:
-
- right_button = True
- right_click = pointer
-
- elif event == cv2.EVENT_RBUTTONUP and right_button:
-
- right_button = False
-
- # Udpate right_drag
- if right_button:
-
- right_drag = (pointer[0] - right_click[0], pointer[1] - right_click[1])
-
- # Attach mouse callback to window
- cv2.setMouseCallback(aruco_camera.name, on_mouse_event)
-
- # Enable movie video capture
- video_capture = cv2.VideoCapture(args.movie)
-
- video_fps = video_capture.get(cv2.CAP_PROP_FPS)
- video_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
- video_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
-
- # Enable exit signal handler
- exit = UtilsFeatures.ExitSignalHandler()
-
- # Init image selection
- current_image_index = -1
- _, current_image = video_capture.read()
- next_image_index = int(args.start * video_fps)
- refresh = False
-
- # Init marker selection
- scene_markers = {}
- selected_marker_id = -1
- hovered_marker_id = -1
-
- # Init place edition
- place_edit = {}
-
- while not exit.status():
-
- # Edit fake gaze position from pointer
- gaze_position = GazeFeatures.GazePosition(pointer, precision=2)
-
- # Reset info image
- info_image = numpy.full((850, 1500, 3), 0, dtype=numpy.uint8)
-
- # Select a new image and detect markers once
- if next_image_index != current_image_index or refresh or draw_cover:
-
- video_capture.set(cv2.CAP_PROP_POS_FRAMES, next_image_index)
-
- success, video_image = video_capture.read()
-
- if success:
-
- # Refresh once
- refresh = False
-
- current_image_index = video_capture.get(cv2.CAP_PROP_POS_FRAMES) - 1
- current_image_time = video_capture.get(cv2.CAP_PROP_POS_MSEC)
-
- # Keep central square
- cv2.rectangle(video_image, (0, 0), (int((video_width-video_height)/2), int(video_height)), (0, 0, 0), -1)
- cv2.rectangle(video_image, (int(video_width-(video_width-video_height)/2), 0), (int(video_width), int(video_height)), (0, 0, 0), -1)
-
- # Hide zone
- if draw_cover:
-
- # Draw black circle under pointer
- cv2.circle(video_image, pointer, 50, (0, 0, 0), -1)
-
- # Process video image
- try:
-
- aruco_camera.watch(current_image_time, video_image)
- exception = None
-
- except Exception as e:
-
- exception = e
-
- # Update video image
- video_image = aruco_camera.image()
-
- # Write exception
- if exception is not None:
-
- cv2.rectangle(video_image, (0, video_height-50), (video_width, video_height), (0, 0, 127), -1)
- cv2.putText(video_image, f'{exception}', (20, video_height-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
-
- # Draw gray panel on top
- cv2.rectangle(video_image, (0, 0), (video_width, 50), (63, 63, 63), -1)
-
- # Draw camera calibration
- if draw_grid:
-
- cv2.putText(video_image, f'Grid at {z_grid} cm', (500, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
- aruco_camera.aruco_detector.optic_parameters.draw(video_image, video_width/10, video_height/10, z_grid, color=(127, 127, 127))
-
- # Write timing
- cv2.putText(video_image, f'Time: {int(current_image_time)} ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
-
- # Copy image
- current_image = video_image.copy()
-
- # Keep last image
- else:
-
- video_image = current_image.copy()
- '''
- # Handle scene marker selection on left click
- if len(scene_markers) > 0:
-
- # Update selected and hovered markers id
- selected_marker_id = -1
- hovered_marker_id = -1
- for (marker_id, marker) in scene_markers.items():
-
- marker_aoi = marker.corners.reshape(4, 2).view(AOIFeatures.AreaOfInterest)
-
- # Select by left clicking on marker
- if marker_aoi.contains_point(left_click):
-
- selected_marker_id = marker_id
-
- # Hover by pointing on marker
- if marker_aoi.contains_point(pointer):
-
- hovered_marker_id = marker_id
-
- # Edit marker's color
- color_list = list(itertools.permutations([0, 255, 255]))
-
- for i, m in scene_markers.items():
-
- m.color = color_list[i%len(color_list)]
-
- if i == selected_marker_id or i == hovered_marker_id:
- continue
-
- if hovered_marker_id > 0:
- m.color = (0, 0, 0)
- else:
- m.color = (127, 127, 127)
-
- # Draw center
- cv2.circle(video_image, m.center.astype(int), 5, m.color, -1)
-
- try:
-
- # A marker is selected
- if selected_marker_id >= 0:
-
- try:
-
- # Retreive selected marker
- selected_marker = scene_markers[selected_marker_id]
-
- # Write selected marker id
- cv2.rectangle(info_image, (0, 0), (500, 50), selected_marker.color, -1)
- cv2.putText(info_image, f'Selected marker #{selected_marker.identifier}', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.rectangle(info_image, (0, 50), (500, video_height), (255, 255, 255), -1)
-
- # Write selected marker rotation matrix
- R = ArUcoScene.make_euler_rotation_vector(selected_marker.rotation)
- cv2.putText(info_image, f'Rotation (camera axis)', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{R[0]:.3f}', (40, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{R[1]:.3f}', (40, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{R[2]:.3f}', (40, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
-
- # Write selected marker translation vector
- T = selected_marker.translation
- cv2.putText(info_image, f'Translation (camera axis):', (20, 320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{T[0]:.3f}', (40, 360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{T[1]:.3f}', (40, 400), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{T[2]:.3f}', (40, 440), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
-
- # Retreive selected marker place
- selected_place = ar_scene.aruco_scene.places[selected_marker_id]
-
- # On right click
- if right_button:
-
- pointer_delta_x, pointer_delta_y = right_drag[0] / video_width, right_drag[1] / video_height
-
- place_edit[selected_marker_id] = {'rotation': (0, 0, 0), 'translation': (0, 0, 0)}
-
- if edit_trans:
-
- # Edit place rotation
- if edit_z:
- place_edit[selected_marker_id]['rotation'] = (0, 0, -pointer_delta_y)
- else:
- place_edit[selected_marker_id]['rotation'] = (pointer_delta_y, pointer_delta_x, 0)
-
- else:
-
- # Edit place translation
- if edit_z:
- place_edit[selected_marker_id]['translation'] = (0, 0, pointer_delta_y)
- else:
- place_edit[selected_marker_id]['translation'] = (-pointer_delta_x, pointer_delta_y, 0)
-
- # Edit transformations
- R = ArUcoScene.make_rotation_matrix(*place_edit[selected_marker_id]['rotation']).T
- T = numpy.array(place_edit[selected_marker_id]['translation'])
-
- # Apply transformations
- edited_place = ArUcoScene.Place(selected_place.translation + T, selected_place.rotation.dot(R), selected_marker)
-
- else:
-
- edited_place = selected_place
-
- # A marker is hovered while another is selected
- if hovered_marker_id >= 0 and hovered_marker_id != selected_marker_id:
-
- # Retreive hovered marker
- hovered_marker = scene_markers[hovered_marker_id]
-
- # Write hovered marker id
- cv2.rectangle(info_image, (500, 0), (1000, 50), hovered_marker.color, -1)
- cv2.putText(info_image, f'Hovered marker #{hovered_marker.identifier}', (520, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.rectangle(info_image, (500, 50), (1000, video_height), (255, 255, 255), -1)
-
- # Write hovered marker rotation matrix
- R = ArUcoScene.make_euler_rotation_vector(hovered_marker.rotation)
- cv2.putText(info_image, f'Rotation (camera axis)', (520, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{R[0]:.3f}', (540, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{R[1]:.3f}', (540, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{R[2]:.3f}', (540, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
-
- # Write hovered marker translation vector
- T = hovered_marker.translation
- cv2.putText(info_image, f'Translation (camera axis):', (520, 320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{T[0]:.3f}', (540, 360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{T[1]:.3f}', (540, 400), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{T[2]:.3f}', (540, 440), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
-
- # Retreive hovered marker place
- hovered_place = ar_scene.aruco_scene.places[hovered_marker_id]
-
- # Write hovered place rotation matrix
- R = ArUcoScene.make_euler_rotation_vector(hovered_place.rotation)
- cv2.putText(info_image, f'Rotation (scene axis):', (520, 500), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{R[0]:.3f}', (540, 540), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{R[1]:.3f}', (540, 580), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{R[2]:.3f}', (540, 620), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
-
- # Write hovered place translation vector
- T = hovered_place.translation
- cv2.putText(info_image, f'Translation (scene axis):', (520, 700), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{T[0]:.3f}', (540, 740), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{T[1]:.3f}', (540, 780), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{T[2]:.3f}', (540, 820), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
-
- # Rotation between markers and places
- markers_rotation_matrix = hovered_marker.rotation.dot(selected_marker.rotation.T)
- places_rotation_matrix = hovered_place.rotation.dot(selected_place.rotation.T)
-
- markers_rotation_vector = ArUcoScene.make_euler_rotation_vector(markers_rotation_matrix)
- places_rotation_vector = ArUcoScene.make_euler_rotation_vector(places_rotation_matrix)
-
- # Translation info between markers and places
- markers_translation = hovered_marker.translation - selected_marker.translation
- places_translation = hovered_place.translation - selected_place.translation
-
- # Write selected/hovered markers id
- cv2.rectangle(info_image, (1000, 0), (1500, 50), (63, 63, 63), -1)
- cv2.putText(info_image, f'#{selected_marker.identifier} -> #{hovered_marker.identifier}', (1020, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
- cv2.rectangle(info_image, (1000, 50), (1500, video_height), (190, 190, 190), -1)
-
- # Write selected/hovered markers rotation matrix
- R = markers_rotation_vector
- cv2.putText(info_image, f'Rotation (camera axis)', (1020, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{R[0]:.3f}', (1040, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{R[1]:.3f}', (1040, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{R[2]:.3f}', (1040, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
-
- # Write selected/hovered markers translation vector
- T = markers_translation
- cv2.putText(info_image, f'Translation (camera axis):', (1020, 320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{T[0]:.3f}', (1040, 360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{T[1]:.3f}', (1040, 400), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{T[2]:.3f}', (1040, 440), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
-
- # Write selected/hovered places rotation matrix
- R = places_rotation_vector
- cv2.putText(info_image, f'Rotation (scene axis):', (1020, 500), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{R[0]:.3f}', (1040, 540), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{R[1]:.3f}', (1040, 580), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{R[2]:.3f}', (1040, 620), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
-
- # Write selected/hovered places translation vector
- T = places_translation
- cv2.putText(info_image, f'Translation (scene axis):', (1020, 700), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{T[0]:.3f}', (1040, 740), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{T[1]:.3f}', (1040, 780), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{T[2]:.3f}', (1040, 820), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
-
- if snap:
-
- # Snap once
- snap = False
-
- print(f'******* SNAP {selected_marker_id} / {hovered_marker_id} *******')
-
- # Edit rotation transformation
- #R = places_rotation_matrix.dot(rmat.T).dot(markers_rotation_matrix.T).dot(rmat)
-
- #rmat_places_rotation_vector = ArUcoScene.make_euler_rotation_vector(places_rotation_matrix.dot(rmat.T))
- rdiff = places_rotation_vector - markers_rotation_vector
- R = ArUcoScene.make_rotation_matrix(*rdiff)
-
- print(f'markers_rotation_vector: {markers_rotation_vector}')
- print(f'places_rotation_vector: {places_rotation_vector}')
- print(f'rdiff: {rdiff}')
- print(f'R: {ArUcoScene.make_euler_rotation_vector(R)}')
-
- # Edit translation transformation
- T = (places_translation.dot(rmat.T) - markers_translation).dot(rmat)
-
- print(f'markers_translation: {markers_translation} ({numpy.linalg.norm(markers_translation)})')
- print(f'places_translation: {places_translation} ({numpy.linalg.norm(places_translation)})')
- print(f'T: {T} ({numpy.linalg.norm(T)})')
-
- # Apply transformations
- edited_place = ArUcoScene.Place(selected_place.translation + T, selected_place.rotation, selected_marker)
-
- # Write edited place rotation matrix
- R = ArUcoScene.make_euler_rotation_vector(edited_place.rotation)
- cv2.putText(info_image, f'Rotation (scene axis):', (20, 500), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{R[0]:.3f}', (40, 540), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{R[1]:.3f}', (40, 580), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{R[2]:.3f}', (40, 620), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
-
- # Write edited place translation vector
- T = edited_place.translation
- cv2.putText(info_image, f'Translation (scene axis):', (20, 700), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{T[0]:.3f}', (40, 740), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{T[1]:.3f}', (40, 780), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_image, f'{T[2]:.3f}', (40, 820), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
-
- # Replace selected place by edited place
- ar_scene.aruco_scene.places[selected_marker_id] = edited_place
-
- # Refresh places consistency
- ar_scene.aruco_scene.init_places_consistency()
-
- # Estimate scene pose from each marker
- cv2.putText(video_image, f'Single marker scene pose estimation', (20, video_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
-
- for i, m in scene_markers.items():
-
- tvec, rmat = ar_scene.aruco_scene.estimate_pose_from_single_marker(m)
-
- # Project AOI scene into image according estimated pose
- aoi_scene_projection = ar_scene.project(tvec, rmat, visual_hfov=TobiiSpecifications.VISUAL_HFOV)
-
- if i == selected_marker_id:
-
- # Draw AOI scene projection with gaze
- aoi_scene_projection.draw_circlecast(video_image, gaze_position, 1, base_color=m.color, matching_color=(255, 255, 255))
-
- else:
-
- # Draw AOI scene
- aoi_scene_projection.draw(video_image, color=m.color)
-
- # Draw expected marker places
- ar_scene.draw_places(video_image)
-
- # Catch missing selected marker
- except KeyError:
-
- cv2.putText(video_image, f'Marker {selected_marker_id} not found', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
-
- # No marker selected
- else:
-
- cv2.putText(info_image, f'Left click on marker to select it', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
-
- if len(scene_markers) > 1:
-
- # Check markers consistency
- consistent_markers, unconsistent_markers, unconsistencies = ar_scene.aruco_scene.check_markers_consistency(scene_markers, ar_scene.angle_tolerance, ar_scene.distance_tolerance)
-
- # No marker hovered
- if hovered_marker_id < 0:
-
- # Set unconsistent marker color to red
- for i, m in scene_markers.items():
- if i in list(unconsistent_markers.keys()) and i != hovered_marker_id:
- m.color = (0, 0, 255)
-
- # Write unconsistencies
- line = 0
- for i, (label, value) in enumerate(unconsistencies['rotation'].items()):
-
- current_rotation = value['current']
- expected_rotation = value['expected']
-
- cv2.putText(info_image, f'Unconsistent rotation {label}: [{current_rotation[0]:.3f} {current_rotation[1]:.3f} {current_rotation[2]:.3f}]', (20, 120+line*40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- line += 1
-
- cv2.putText(info_image, f'Expected rotation {label}: [{expected_rotation[0]:.3f} {expected_rotation[1]:.3f} {expected_rotation[2]:.3f}]', (20, 120+line*40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
- line += 1
-
- for i, (label, value) in enumerate(unconsistencies['translation'].items()):
-
- current_translation = value['current']
- expected_translation = value['expected']
-
- cv2.putText(info_image, f'Unconsistent translation {label}: {current_translation:.3f}', (20, 120+ line*40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- line += 1
-
- cv2.putText(info_image, f'Expected translation {label}: {expected_translation:.3f}', (20, 120+ line*40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
- line += 1
-
- # Force pose mode to single marker scene pose estimation
- else:
-
- pose_mode = 0
-
- # Single marker scene pose estimation
- if pose_mode == 0:
-
- cv2.putText(video_image, f'Single marker scene pose estimation', (20, video_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
-
- for i, m in scene_markers.items():
-
- tvec, rmat = ar_scene.aruco_scene.estimate_pose_from_single_marker(m)
-
- # Project AOI scene into image according estimated pose
- aoi_scene_projection = ar_scene.project(tvec, rmat, visual_hfov=TobiiSpecifications.VISUAL_HFOV)
-
- # Draw AOI scene
- aoi_scene_projection.draw(video_image, color=m.color)
-
- # Consistent markers scene pose estimation
- if pose_mode == 1:
-
- cv2.putText(video_image, f'Consistent markers scene pose estimation', (20, video_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
-
- tvec, rmat = ar_scene.aruco_scene.estimate_pose_from_markers(consistent_markers)
-
- # Project AOI scene into image according estimated pose
- aoi_scene_projection = ar_scene.project(tvec, rmat, visual_hfov=TobiiSpecifications.VISUAL_HFOV)
-
- # Draw AOI scene
- aoi_scene_projection.draw(video_image, color=(255, 255, 255))
-
- # ArUco marker axis scene pose estimation
- elif pose_mode == 2:
-
- # Write pose estimation strategy
- cv2.putText(video_image, f'ArUco marker axis scene pose estimation', (20, video_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
-
- for axis_name, axis_markers in ar_scene.aruco_axis.items():
-
- try:
-
- origin_marker = scene_markers[axis_markers['origin_marker']]
- horizontal_axis_marker = scene_markers[axis_markers['horizontal_axis_marker']]
- vertical_axis_marker = scene_markers[axis_markers['vertical_axis_marker']]
-
- tvec, rmat = ar_scene.aruco_scene.estimate_pose_from_axis_markers(origin_marker, horizontal_axis_marker, vertical_axis_marker)
-
- # Project AOI scene into image according estimated pose
- aoi_scene_projection = ar_scene.project(tvec, rmat, visual_hfov=TobiiSpecifications.VISUAL_HFOV)
-
- # Draw AOI scene
- aoi_scene_projection.draw(video_image, color=(255, 255, 255))
-
- break
-
- except:
- pass
-
- # ArUco AOI scene building
- elif pose_mode == 3:
-
- # Write pose estimation strategy
- cv2.putText(video_image, f'ArUco AOI scene building', (20, video_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
-
- try :
-
- # Try to build AOI scene from detected ArUco marker corners
- aoi_scene_projection = ar_scene.build_aruco_aoi_scene(scene_markers)
-
- # Draw AOI scene
- aoi_scene_projection.draw(video_image, color=(255, 255, 255))
-
- except:
- pass
-
- # Draw expected marker places
- #ar_scene.draw_places(video_image)
-
- # Catch exceptions raised by estimate_pose and project methods
- except (ArFeatures.PoseEstimationFailed) as e:
-
- cv2.rectangle(video_image, (0, 90), (700, 130), (127, 127, 127), -1)
- cv2.putText(video_image, f'Error: {e}', (20, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
-
- # Draw image
- cv2.imshow(aruco_camera.name, video_image)
- '''
-
- # Draw pointer
- gaze_position.draw(video_image)
-
- # Write documentation
- cv2.putText(video_image, f'Press \'h\' for help', (950, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- '''
- if draw_help:
-
- cv2.rectangle(video_image, (0, 50), (700, 300), (127, 127, 127), -1)
- cv2.putText(video_image, f'> Left click on marker: select marker', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_image, f'> Left click on image: unselect marker', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_image, f'> T: translate, R: rotate, Z: select axis', (20, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_image, f'> Right click and drag: edit axis', (20, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_image, f'> Ctrl + S: save environment', (20, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_image, f'> Backspace: reload environment', (20, 280), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
-
- # Write selected marker id
- elif selected_marker_id >= 0:
-
- cv2.rectangle(video_image, (0, 50), (700, 90), (127, 127, 127), -1)
-
- # Select color
- if edit_z:
- str_axis = 'Z'
- color_axis = (255, 0, 0)
- else:
- str_axis = 'XY'
- color_axis = (0, 255, 255)
-
- if edit_trans:
- cv2.putText(video_image, f'Rotate marker {selected_marker_id} around axis {str_axis}', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, color_axis, 1, cv2.LINE_AA)
- else:
- cv2.putText(video_image, f'Translate marker {selected_marker_id} along axis {str_axis}', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, color_axis, 1, cv2.LINE_AA)
- '''
- key_pressed = cv2.waitKey(10)
-
- #if key_pressed != -1:
- # print(key_pressed)
-
- # Select previous image with left arrow
- if key_pressed == 2:
- next_image_index -= 1
-
- # Select next image with right arrow
- if key_pressed == 3:
- next_image_index += 1
-
- # Clip image index
- if next_image_index < 0:
- next_image_index = 0
-
- # Edit rotation with r key
- if key_pressed == 114:
- edit_trans = True
-
- # Edit translation with t key
- if key_pressed == 116:
- edit_trans = False
-
- # Switch Z axis edition
- if key_pressed == 122:
- edit_z = not edit_z
-
- # Snap hovered marker with s key
- if key_pressed == 115:
- snap = True
-
- # Switch help mode with h key
- if key_pressed == 104:
- draw_help = not draw_help
-
- # Switch grid mode with g key
- if key_pressed == 103:
- draw_grid = not draw_grid
- refresh = True
-
- # Raise z grid with down arrow
- if key_pressed == 0:
- z_grid += 10.
- refresh = True
-
- # Unraise z grid with up arrow
- if key_pressed == 1:
- z_grid -= 10.
- refresh = True
-
- # Switch draw_cover mode with c key
- if key_pressed == 99:
- draw_cover = not draw_cover
-
- # Switch pose estimation mode with m key
- if key_pressed == 109:
- pose_mode += 1
- if pose_mode > 3:
- pose_mode = 0
-
- # Save selected marker edition using 'Ctrl + s'
- if key_pressed == 19:
- aruco_camera.to_json(args.output)
- print(f'Environment saved into {args.output}')
-
- # Close window using 'Esc' key
- if key_pressed == 27:
- break
-
- # Reload configuration on 'Backspace' key
- if key_pressed == 127:
- aruco_camera = ArUcoCamera.ArUcoCamera.from_json(args.configuration)
- print(f'Configuration reloaded from {args.configuration}')
- refresh = True
-
- # Display video
- cv2.imshow(aruco_camera.name, video_image)
-
- # Display info
- cv2.imshow('Info', info_image)
-
- # Close movie capture
- video_capture.release()
-
- # Stop image display
- cv2.destroyAllWindows()
-
-if __name__ == '__main__':
-
- main() \ No newline at end of file
diff --git a/src/argaze/utils/pipeline_run.py b/src/argaze/utils/context_run.py
index 3a8640f..ace7c54 100644
--- a/src/argaze/utils/pipeline_run.py
+++ b/src/argaze/utils/context_run.py
@@ -31,6 +31,8 @@ parser = argparse.ArgumentParser(description=__doc__.split('-')[0])
parser.add_argument('configuration', metavar='CONFIGURATION', type=str, help='JSON configuration filepath')
parser.add_argument('-p', '--patch', metavar='PATCH', type=str, help='JSON configuration patch filepath')
parser.add_argument('-v', '--verbose', action='store_true', default=False, help='enable verbose mode to print information in console')
+parser.add_argument('-m', '--mouse', action='store_true', default=False, help='use mouse pointer as gaze position')
+
args = parser.parse_args()
# Manage logging
diff --git a/src/argaze/utils/contexts/OpenCV.py b/src/argaze/utils/contexts/OpenCV.py
new file mode 100644
index 0000000..5a35fba
--- /dev/null
+++ b/src/argaze/utils/contexts/OpenCV.py
@@ -0,0 +1,62 @@
+"""Define OpenCV window display context
+
+This program is free software: you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation, either version 3 of the License, or (at your option) any later
+version.
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+You should have received a copy of the GNU General Public License along with
+this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+__author__ = "Théo de la Hogue"
+__credits__ = []
+__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)"
+__license__ = "GPLv3"
+
+import sys
+import logging
+import time
+
+from argaze import ArFeatures, DataFeatures, GazeFeatures
+from argaze.utils import UtilsFeatures
+
+import numpy
+import cv2
+
+class Window(ArFeatures.ArContext):
+
+ @DataFeatures.PipelineStepInit
+ def __init__(self, **kwargs):
+
+ # Init parent classes
+ super().__init__()
+
+ @DataFeatures.PipelineStepEnter
+ def __enter__(self):
+
+ logging.info('OpenCV context starts...')
+
+ # Create a window to display context
+ cv2.namedWindow(self.name, cv2.WINDOW_AUTOSIZE)
+
+ # Init timestamp
+ self.__start_time = time.time()
+
+ # Attach mouse event callback to window
+ cv2.setMouseCallback(self.name, self.__on_mouse_event)
+
+ return self
+
+ def __on_mouse_event(self, event, x, y, flags, param):
+ """Process pointer position."""
+
+ logging.debug('Window.on_mouse_event %i %i', x, y)
+
+ # Process timestamped gaze position
+ self._process_gaze_position(
+ timestamp = int((time.time() - self.__start_time) * 1e3),
+ x = x,
+ y = y) \ No newline at end of file
diff --git a/src/argaze/utils/eyetrackers/TobiiProGlasses2.py b/src/argaze/utils/contexts/TobiiProGlasses2.py
index 8b92fef..8b92fef 100644
--- a/src/argaze/utils/eyetrackers/TobiiProGlasses2.py
+++ b/src/argaze/utils/contexts/TobiiProGlasses2.py
diff --git a/src/argaze/utils/eyetrackers/__init__.py b/src/argaze/utils/contexts/__init__.py
index b76cd8b..b76cd8b 100644
--- a/src/argaze/utils/eyetrackers/__init__.py
+++ b/src/argaze/utils/contexts/__init__.py
diff --git a/src/argaze/utils/demo/demo.mov b/src/argaze/utils/demo/demo.mov
deleted file mode 100644
index bba7999..0000000
--- a/src/argaze/utils/demo/demo.mov
+++ /dev/null
Binary files differ
diff --git a/src/argaze/utils/demo/opencv_window_context_setup.json b/src/argaze/utils/demo/opencv_window_context_setup.json
new file mode 100644
index 0000000..da7dc78
--- /dev/null
+++ b/src/argaze/utils/demo/opencv_window_context_setup.json
@@ -0,0 +1,10 @@
+{
+ "argaze.utils.contexts.OpenCV.Window" : {
+ "name": "OpenCV Window",
+ "pipeline": "gaze_analysis_pipeline.json",
+ "image_parameters": {
+ "draw_times": true,
+ "draw_exceptions": true
+ }
+ }
+} \ No newline at end of file
diff --git a/src/argaze/utils/demo/eyetracker_setup.json b/src/argaze/utils/demo/tobii_live_stream_context_setup.json
index 8d47542..275d77f 100644
--- a/src/argaze/utils/demo/eyetracker_setup.json
+++ b/src/argaze/utils/demo/tobii_live_stream_context_setup.json
@@ -1,5 +1,5 @@
{
- "argaze.utils.eyetrackers.TobiiProGlasses2.LiveStream" : {
+ "argaze.utils.contexts.TobiiProGlasses2.LiveStream" : {
"name": "Tobii Pro Glasses 2 live stream",
"address": "10.34.0.17",
"project": "MyProject",
diff --git a/src/argaze/utils/demo_aruco_markers_run.py b/src/argaze/utils/demo_aruco_markers_run.py
deleted file mode 100644
index cdd9184..0000000
--- a/src/argaze/utils/demo_aruco_markers_run.py
+++ /dev/null
@@ -1,203 +0,0 @@
-#!/usr/bin/env python
-
-"""Augmented Reality pipeline demo script.
-
-This program is free software: you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free Software
-Foundation, either version 3 of the License, or (at your option) any later
-version.
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-You should have received a copy of the GNU General Public License along with
-this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-
-__author__ = "Théo de la Hogue"
-__credits__ = []
-__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)"
-__license__ = "GPLv3"
-
-import argparse
-import logging
-import contextlib
-import os
-import time
-
-from argaze import ArFeatures, GazeFeatures
-from argaze.ArUcoMarkers import ArUcoCamera
-from argaze.utils import UtilsFeatures
-
-import cv2
-
-current_directory = os.path.dirname(os.path.abspath(__file__))
-
-# Manage arguments
-parser = argparse.ArgumentParser(description=__doc__.split('-')[0])
-parser.add_argument('configuration', metavar='CONFIGURATION', type=str, help='configuration filepath')
-parser.add_argument('-s', '--source', metavar='SOURCE', type=str, default='0', help='video capture source (a number to select camera device or a filepath to load a movie)')
-parser.add_argument('-v', '--verbose', action='store_true', default=False, help='enable verbose mode to print information in console')
-
-args = parser.parse_args()
-
-# Manage logging
-logging.basicConfig(format = '%(levelname)s: %(message)s', level = logging.DEBUG if args.verbose else logging.INFO)
-
-def main():
-
- # Load ArUcoCamera
- with ArUcoCamera.ArUcoCamera.from_json(args.configuration) as aruco_camera:
-
- if args.verbose:
-
- print(aruco_camera)
-
- # Create a window to display ArUcoCamera
- cv2.namedWindow(aruco_camera.name, cv2.WINDOW_AUTOSIZE)
-
- # Init timestamp
- start_time = time.time()
-
- # Prepare gaze analysis assessment
- call_chrono = UtilsFeatures.TimeProbe()
- call_chrono.start()
-
- gaze_positions_frequency = 0
- gaze_analysis_time = 0
-
- # Fake gaze position with mouse pointer
- def on_mouse_event(event, x, y, flags, param):
-
- nonlocal gaze_positions_frequency
- nonlocal gaze_analysis_time
-
- # Assess gaze analysis
- lap_time, nb_laps, elapsed_time = call_chrono.lap()
-
- if elapsed_time > 1e3:
-
- gaze_positions_frequency = nb_laps
- call_chrono.restart()
-
- # Edit millisecond timestamp
- timestamp = int((time.time() - start_time) * 1e3)
-
- #try:
-
- # Project gaze position into camera
- aruco_camera.look(GazeFeatures.GazePosition((x, y), timestamp=timestamp))
-
- # Assess gaze analysis
- gaze_analysis_time = aruco_camera.execution_times['look']
-
- #except Exception as e:
-
- # print(e)
- # gaze_analysis_time = 0
-
- # Attach mouse callback to window
- cv2.setMouseCallback(aruco_camera.name, on_mouse_event)
-
- # Prepare video fps assessment
- video_fps = 0
- video_chrono = UtilsFeatures.TimeProbe()
- video_chrono.start()
-
- # Prepare visualisation time assessment
- visualisation_time = 0
-
- # Enable camera video capture into separate thread
- video_capture = cv2.VideoCapture(int(args.source) if args.source.isdecimal() else args.source)
-
- # Waiting for 'ctrl+C' interruption
- with contextlib.suppress(KeyboardInterrupt):
-
- # Assess capture time
- capture_start = time.time()
-
- # Capture images
- while video_capture.isOpened():
-
- # Read video image
- success, video_image = video_capture.read()
-
- # Assess capture time
- capture_time = int((time.time() - capture_start) * 1e3)
-
- if success:
-
- # Assess video fps
- lap_time, nb_laps, elapsed_time = video_chrono.lap()
-
- if elapsed_time > 1e3:
-
- video_fps = nb_laps
- video_chrono.restart()
-
- #try:
-
- # Detect and project AR features
- aruco_camera.watch(video_image, timestamp=capture_time)
-
- # Detection suceeded
- exception = None
-
- # Write errors
- #except Exception as e:
-
- # exception = e
-
- # Assess visualisation time
- visualisation_start = time.time()
-
- # Get ArUcoCamera frame image
- aruco_camera_image = aruco_camera.image()
-
- # Get execution times
- detection_time = aruco_camera.aruco_detector.execution_times['detect_markers']
- projection_time = aruco_camera.execution_times['watch'] - detection_time
-
- # Write time info
- cv2.rectangle(aruco_camera_image, (0, 0), (aruco_camera.size[0], 100), (63, 63, 63), -1)
- cv2.putText(aruco_camera_image, f'{video_fps} FPS | Capture {capture_time}ms | Detection {int(detection_time)}ms | Projection {int(projection_time)}ms | Visualisation {visualisation_time}ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(aruco_camera_image, f'{gaze_positions_frequency} gaze positions/s | Gaze analysis {gaze_analysis_time:.2f}ms', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
-
- # Handle exceptions
- if exception is not None:
-
- cv2.rectangle(aruco_camera_image, (0, 100), (aruco_camera.size[0], 80), (127, 127, 127), -1)
- cv2.putText(aruco_camera_image, f'error: {exception}', (20, 140), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
-
- # Write hint
- cv2.putText(aruco_camera_image, 'Mouve mouse pointer over gray rectangle area', (20, aruco_camera.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
-
- # Display ArUcoCamera frame image
- cv2.imshow(aruco_camera.name, aruco_camera_image)
-
- # Draw and display each scene frames
- for scene_frame in aruco_camera.scene_frames():
-
- # Display scene frame
- cv2.imshow(f'{scene_frame.parent.name}:{scene_frame.name}', scene_frame.image())
-
- else:
-
- # Assess visualisation time
- visualisation_start = time.time()
-
- # Stop by pressing 'Esc' key
- # NOTE: on MacOS, cv2.waitKey(1) waits ~40ms
- if cv2.waitKey(1) == 27:
-
- # Close camera video capture
- video_capture.release()
-
- # Assess visualisation time
- visualisation_time = int((time.time() - visualisation_start) * 1e3)
-
- # Stop image display
- cv2.destroyAllWindows()
-
-if __name__ == '__main__':
-
- main()
diff --git a/src/argaze/utils/demo_gaze_analysis_run.py b/src/argaze/utils/demo_gaze_analysis_run.py
deleted file mode 100644
index 16644ce..0000000
--- a/src/argaze/utils/demo_gaze_analysis_run.py
+++ /dev/null
@@ -1,274 +0,0 @@
-#!/usr/bin/env python
-
-"""Gaze analysis pipeline demo script.
-
-This program is free software: you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free Software
-Foundation, either version 3 of the License, or (at your option) any later
-version.
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-You should have received a copy of the GNU General Public License along with
-this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-
-__author__ = "Théo de la Hogue"
-__credits__ = []
-__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)"
-__license__ = "GPLv3"
-
-import argparse
-import logging
-import contextlib
-import os
-import time
-
-from argaze import ArFeatures, GazeFeatures
-from argaze.GazeAnalysis import *
-from argaze.utils import UtilsFeatures
-
-import cv2
-
-current_directory = os.path.dirname(os.path.abspath(__file__))
-
-# Manage arguments
-parser = argparse.ArgumentParser(description=__doc__.split('-')[0])
-parser.add_argument('configuration', metavar='CONFIGURATION', type=str, help='configuration filepath')
-parser.add_argument('-v', '--verbose', action='store_true', default=False, help='enable verbose mode to print information in console')
-args = parser.parse_args()
-
-# Manage logging
-logging.basicConfig(format = '%(levelname)s: %(message)s', level = logging.DEBUG if args.verbose else logging.INFO)
-
-def main():
-
- # Load ArFrame
- with ArFeatures.ArFrame.from_json(args.configuration) as ar_frame:
-
- if args.verbose:
-
- print(ar_frame)
-
- # Create a window to display ArCamera
- cv2.namedWindow(ar_frame.name, cv2.WINDOW_AUTOSIZE)
-
- # Heatmap buffer display option
- enable_heatmap_buffer = False
-
- # Init timestamp
- start_time = time.time()
-
- # Update pointer position
- def on_mouse_event(event, x, y, flags, param):
-
- #try:
-
- # Project gaze position into frame with millisecond timestamp
- ar_frame.look(GazeFeatures.GazePosition((x, y), timestamp=int((time.time() - start_time) * 1e3)))
-
- # Catch pipeline exception
- #except Exception as e:
-
- # print('Gaze projection error:', e)
-
- # Attach mouse callback to window
- cv2.setMouseCallback(ar_frame.name, on_mouse_event)
-
- # Waiting for 'ctrl+C' interruption
- with contextlib.suppress(KeyboardInterrupt):
-
- # Draw frame and mouse position analysis
- while True:
-
- # Get frame image
- frame_image = ar_frame.image()
-
- # Write heatmap buffer manual
- buffer_on_off = 'on' if enable_heatmap_buffer else 'off'
- buffer_display_disable = 'disable' if enable_heatmap_buffer else 'enable'
- cv2.putText(frame_image, f'Heatmap buffer: {buffer_on_off} (Press \'b\' key to {buffer_display_disable})', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_heatmap_buffer else (255, 255, 255), 1, cv2.LINE_AA)
-
- # Write last 5 steps of aoi scan path
- path = ''
- for step in ar_frame.layers["demo_layer"].aoi_scan_path[-5:]:
-
- path += f'> {step.aoi} '
-
- path += f'> {ar_frame.layers["demo_layer"].aoi_scan_path.current_aoi}'
-
- cv2.putText(frame_image, path, (20, ar_frame.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
-
- # Display frame analysis
- analysis = ar_frame.analysis()
-
- # Display scan path K Coefficient analysis if loaded
- try:
-
- kc_analysis = analysis[KCoefficient.ScanPathAnalyzer]
-
- # Write raw Kc analysis
- if kc_analysis.K < 0.:
-
- cv2.putText(frame_image, f'K coefficient: Ambient attention', (20, ar_frame.size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
-
- elif kc_analysis.K > 0.:
-
- cv2.putText(frame_image, f'K coefficient: Focal attention', (20, ar_frame.size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)
-
- except KeyError:
- pass
-
- # Display Nearest Neighbor index analysis if loaded
- try:
-
- nni_analysis = analysis[NearestNeighborIndex.ScanPathAnalyzer]
-
- cv2.putText(frame_image, f'Nearest neighbor index: {nni_analysis.nearest_neighbor_index:.3f}', (20, ar_frame.size[1]-320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
-
- except KeyError:
- pass
-
- # Display Explore/Exploit ratio analysis if loaded
- try:
-
- xxr_analyser = analysis[ExploreExploitRatio.ScanPathAnalyzer]
-
- cv2.putText(frame_image, f'Explore/Exploit ratio: {xxr_analyser.explore_exploit_ratio:.3f}', (20, ar_frame.size[1]-360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
-
- except KeyError:
- pass
-
- # Display demo_layer analysis
- analysis = ar_frame.layers["demo_layer"].analysis()
-
- # Display Transition matrix analysis if loaded
- try:
-
- transition_matrix_analysis = analysis[TransitionMatrix.AOIScanPathAnalyzer]
-
- cv2.putText(frame_image, f'Transition matrix density: {transition_matrix_analysis.transition_matrix_density:.2f}', (20, ar_frame.size[1]-160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
-
- # Iterate over indexes (departures)
- for from_aoi, row in transition_matrix_analysis.transition_matrix_probabilities.iterrows():
-
- # Iterate over columns (destinations)
- for to_aoi, probability in row.items():
-
- if from_aoi != GazeFeatures.OutsideAOI and to_aoi != GazeFeatures.OutsideAOI:
-
- if from_aoi != to_aoi and probability > 0.0:
-
- from_center = ar_frame.layers["demo_layer"].aoi_scene[from_aoi].center.astype(int)
- to_center = ar_frame.layers["demo_layer"].aoi_scene[to_aoi].center.astype(int)
- start_line = (0.5 * from_center + 0.5 * to_center).astype(int)
-
- color = [int(probability*200) + 55, int(probability*200) + 55, int(probability*200) + 55]
-
- cv2.line(frame_image, start_line, to_center, color, int(probability*10) + 2)
- cv2.line(frame_image, from_center, to_center, [55, 55, 55], 2)
-
- except KeyError:
- pass
-
- # Display aoi scan path basic metrics analysis if loaded
- try:
-
- basic_analysis = analysis[Basic.AOIScanPathAnalyzer]
-
- # Write basic analysis
- cv2.putText(frame_image, f'Step number: {basic_analysis.steps_number}', (20, ar_frame.size[1]-440), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(frame_image, f'Step fixation duration average: {int(basic_analysis.step_fixation_durations_average)} ms', (20, ar_frame.size[1]-400), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
-
- except KeyError:
- pass
-
- # Display aoi scan path K-modified coefficient analysis if loaded
- try:
-
- aoi_kc_analysis = analysis[KCoefficient.AOIScanPathAnalyzer]
-
- # Write aoi Kc analysis
- if aoi_kc_analysis.K < 0.:
-
- cv2.putText(frame_image, f'K-modified coefficient: Ambient attention', (20, ar_frame.size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
-
- elif aoi_kc_analysis.K > 0.:
-
- cv2.putText(frame_image, f'K-modified coefficient: Focal attention', (20, ar_frame.size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)
-
- except KeyError:
- pass
-
- # Display Lempel-Ziv complexity analysis if loaded
- try:
-
- lzc_analysis = analysis[LempelZivComplexity.AOIScanPathAnalyzer]
-
- cv2.putText(frame_image, f'Lempel-Ziv complexity: {lzc_analysis.lempel_ziv_complexity}', (20, ar_frame.size[1]-200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
-
- except KeyError:
- pass
-
- # Display N-Gram analysis if loaded
- try:
-
- ngram_analysis = analysis[NGram.AOIScanPathAnalyzer]
-
- # Display only 3-gram analysis
- start = ar_frame.size[1] - ((len(ngram_analysis.ngrams_count[3]) + 1) * 40)
- cv2.putText(frame_image, f'{ngram_analysis.n_max}-Gram:', (ar_frame.size[0]-700, start-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
-
- for i, (ngram, count) in enumerate(ngram_analysis.ngrams_count[3].items()):
-
- ngram_string = f'{ngram[0]}'
- for g in range(1, 3):
- ngram_string += f'>{ngram[g]}'
-
- cv2.putText(frame_image, f'{ngram_string}: {count}', (ar_frame.size[0]-700, start+(i*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
-
- except KeyError:
- pass
-
- # Display Entropy analysis if loaded
- try:
-
- entropy_analysis = analysis[Entropy.AOIScanPathAnalyzer]
-
- cv2.putText(frame_image, f'Stationary entropy: {entropy_analysis.stationary_entropy:.3f},', (20, ar_frame.size[1]-280), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(frame_image, f'Transition entropy: {entropy_analysis.transition_entropy:.3f},', (20, ar_frame.size[1]-240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
-
- except KeyError:
- pass
-
- # Display frame image
- cv2.imshow(ar_frame.name, frame_image)
-
- key_pressed = cv2.waitKey(10)
-
- #if key_pressed != -1:
- # print(key_pressed)
-
- # Reload environment with 'h' key
- if key_pressed == 114:
-
- ar_frame = ArFeatures.ArFrame.from_json(args.frame)
-
- # Enable heatmap buffer with 'b' key
- if key_pressed == 98:
-
- enable_heatmap_buffer = not enable_heatmap_buffer
-
- ar_frame.heatmap.buffer = 10 if enable_heatmap_buffer else 0
- ar_frame.heatmap.clear()
-
- # Stop by pressing 'Esc' key
- if key_pressed == 27:
- break
-
- # Stop frame image display
- cv2.destroyAllWindows()
-
-if __name__ == '__main__':
-
- main()