aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorThéo de la Hogue2023-07-04 10:36:44 +0200
committerThéo de la Hogue2023-07-04 10:36:44 +0200
commit7fd88a965602a84ee4eafc3b07e974be0c83db29 (patch)
tree58d5bef3f40ff47304ea6fd0d32f07ae5f3a3998 /src
parent2b7261d32ad43ccac013cdc4e77e16ecb3560960 (diff)
downloadargaze-7fd88a965602a84ee4eafc3b07e974be0c83db29.zip
argaze-7fd88a965602a84ee4eafc3b07e974be0c83db29.tar.gz
argaze-7fd88a965602a84ee4eafc3b07e974be0c83db29.tar.bz2
argaze-7fd88a965602a84ee4eafc3b07e974be0c83db29.tar.xz
Adding new camera frame feature.
Diffstat (limited to 'src')
-rw-r--r--src/argaze/ArFeatures.py667
1 files changed, 370 insertions, 297 deletions
diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py
index 3e1a56f..62ce4d8 100644
--- a/src/argaze/ArFeatures.py
+++ b/src/argaze/ArFeatures.py
@@ -63,10 +63,10 @@ class ArEnvironment():
scene._environment = self
# Init AOI scene projections
- self.__aoi_2d_scenes = {}
+ self.__camera_frames = {}
# Init a lock to share AOI scene projections between multiple threads
- self.__aoi_2d_scenes_lock = threading.Lock()
+ self.__camera_frames_lock = threading.Lock()
@classmethod
def from_json(self, json_filepath: str) -> ArSceneType:
@@ -165,11 +165,16 @@ class ArEnvironment():
new_aoi_3d_scene = AOI3DScene.AOI3DScene(aoi_3d_scene_value)
- # Build frames
- new_frames = {}
- for frame_name, frame_data in scene_data.pop('frames').items():
+ # Define frame data processor
+ def frame_data_processor(frame_data, force_frame_size: list = []) -> ArFrame:
- new_frame_size = frame_data.pop('size')
+ if len(force_frame_size) == 2:
+
+ new_frame_size = force_frame_size
+
+ else:
+
+ new_frame_size = frame_data.pop('size')
# Load background image
try:
@@ -190,11 +195,11 @@ class ArEnvironment():
gaze_movement_identifier_type, gaze_movement_identifier_parameters = gaze_movement_identifier_value.popitem()
gaze_movement_identifier_module = importlib.import_module(f'argaze.GazeAnalysis.{gaze_movement_identifier_type}')
- gaze_movement_identifier = gaze_movement_identifier_module.GazeMovementIdentifier(**gaze_movement_identifier_parameters)
+ new_gaze_movement_identifier = gaze_movement_identifier_module.GazeMovementIdentifier(**gaze_movement_identifier_parameters)
except KeyError:
- gaze_movement_identifier = None
+ new_gaze_movement_identifier = None
# Load scan path analyzers
new_scan_path_analyzers = {}
@@ -284,11 +289,30 @@ class ArEnvironment():
pass
- # Append new frame
- new_frames[frame_name] = ArFrame.from_scene(new_aoi_3d_scene, frame_name, new_frame_size, new_frame_background, gaze_movement_identifier, new_scan_path_analyzers, new_aoi_scan_path_analyzers, **frame_data)
+ return new_frame_size, new_frame_background, new_gaze_movement_identifier, new_scan_path_analyzers, new_aoi_scan_path_analyzers
+
+ # Build camera frame as large as aruco dectector optic parameters
+ try:
+
+ camera_frame_data = scene_data.pop('camera_frame')
+ new_frame_size, new_frame_background, new_gaze_movement_identifier, new_scan_path_analyzers, new_aoi_scan_path_analyzers = frame_data_processor(camera_frame_data, force_frame_size=new_optic_parameters.dimensions)
+ new_camera_frame = ArFrame.from_scene(new_aoi_3d_scene, None, new_frame_size, new_frame_background, new_gaze_movement_identifier, new_scan_path_analyzers, new_aoi_scan_path_analyzers, **camera_frame_data)
+
+ except KeyError:
+
+ new_camera_frame = None #ArFrame.from_scene(new_aoi_3d_scene, None, new_optic_parameters.dimensions)
+
+ # Build AOI frames
+ new_aoi_frames = {}
+ for aoi_name, aoi_frame_data in scene_data.pop('aoi_frames').items():
+
+ new_frame_size, new_frame_background, new_gaze_movement_identifier, new_scan_path_analyzers, new_aoi_scan_path_analyzers = frame_data_processor(aoi_frame_data)
+
+ # Append new AOI frame
+ new_aoi_frames[aoi_name] = ArFrame.from_scene(new_aoi_3d_scene, aoi_name, new_frame_size, new_frame_background, new_gaze_movement_identifier, new_scan_path_analyzers, new_aoi_scan_path_analyzers, **aoi_frame_data)
# Append new scene
- new_scenes[scene_name] = ArScene(new_aruco_scene, new_aoi_3d_scene, new_frames, **scene_data)
+ new_scenes[scene_name] = ArScene(new_aruco_scene, new_aoi_3d_scene, new_camera_frame, new_aoi_frames, **scene_data)
return ArEnvironment(new_name, new_aruco_detector, new_scenes)
@@ -313,26 +337,25 @@ class ArEnvironment():
# For each scene
for scene_name, scene in self.scenes.items():
- # For each frame
- for frame_name, frame in scene.frames.items():
+ # For each aoi frame
+ for frame_name, frame in scene.aoi_frames.items():
yield scene_name, frame_name, frame
def detect_and_project(self, image: numpy.array) -> dict:
- """Detect environment aruco markers from image and project scenes."""
+ """Detect environment aruco markers from image and project scenes into camera frame."""
# Detect aruco markers
self.aruco_detector.detect_markers(image)
- # Project each AOI scene
- new_aoi_2d_scenes = {}
+ # Project each aoi 3d scene into camera frame
for scene_name, scene in self.scenes.items():
# Project scene
try:
# Try to build AOI scene from detected ArUco marker corners
- new_aoi_2d_scenes[scene_name] = scene.build_aruco_aoi_scene(self.aruco_detector.detected_markers)
+ scene.build_aruco_aoi_scene(self.aruco_detector.detected_markers)
except SceneProjectionFailed:
@@ -342,63 +365,16 @@ class ArEnvironment():
# Estimate scene pose from detected scene markers
tvec, rmat, _, _ = scene.estimate_pose(self.aruco_detector.detected_markers)
- # Project AOI scene into video image according estimated pose
- new_aoi_2d_scenes[scene_name] = scene.project(tvec, rmat)
-
- # Lock scene projections exploitation
- self.__aoi_2d_scenes_lock.acquire()
-
- # Copy scene projections
- self.__aoi_2d_scenes = new_aoi_2d_scenes.copy()
-
- # Unlock scene projections exploitation
- self.__aoi_2d_scenes_lock.release()
+ # Project AOI scene into camera frame according estimated pose
+ scene.project(tvec, rmat)
- def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition, data_generator: bool = False):
- """Project timestamped gaze position into current scene projections."""
-
- # Can't use scene projection when it is locked
- if self.__aoi_2d_scenes_lock.locked():
-
- #TODO: Store ignored timestamped gaze positions for further projections
- print('Ignoring ', timestamp, gaze_position)
- return
-
- # Lock scene projections
- self.__aoi_2d_scenes_lock.acquire()
+ def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition):
+ """Project timestamped gaze position into each scene."""
# For each aoi scene projection
for scene_name, scene in self.scenes.items():
- try:
-
- aoi_2d_scene = self.__aoi_2d_scenes[scene_name]
-
- # For each scene frames
- for frame_name, frame in scene.frames.items():
-
- # TODO: Add option to use gaze precision circle
- if aoi_2d_scene[frame.name].contains_point(gaze_position.value):
-
- inner_x, inner_y = self.__aoi_2d_scenes[scene_name][frame.name].clockwise().inner_axis(gaze_position.value)
-
- # QUESTION: How to project gaze precision?
- inner_gaze_position = GazeFeatures.GazePosition((inner_x, inner_y))
-
- gaze_movement, look_at, scan_step_analysis, aoi_scan_step_analysis = frame.look(timestamp, inner_gaze_position * frame.size)
-
- # Generate looking data
- if data_generator:
-
- yield scene_name, frame_name, frame, gaze_movement, look_at, scan_step_analysis, aoi_scan_step_analysis
-
- # Ignore missing aoi scene projection
- except KeyError:
-
- pass
-
- # Unlock scene projections
- self.__aoi_2d_scenes_lock.release()
+ scene.look(timestamp, gaze_position)
def to_json(self, json_filepath):
"""Save environment to .json file."""
@@ -413,11 +389,10 @@ class ArEnvironment():
# Draw detected markers
self.aruco_detector.draw_detected_markers(image)
- # Draw each AOI scene
- for scene_name, aoi_2d_scene in self.__aoi_2d_scenes.items():
+ # Draw each scene
+ for scene_name, scene in self.scenes.items():
- # Draw AOI scene projection
- aoi_2d_scene.draw(image, color=(255, 255, 255))
+ scene.draw(image)
class PoseEstimationFailed(Exception):
"""
@@ -440,228 +415,6 @@ class SceneProjectionFailed(Exception):
super().__init__(message)
@dataclass
-class ArScene():
- """
- Define an Augmented Reality scene with ArUco markers and AOI scenes.
-
- Parameters:
- aruco_scene: ArUco markers 3D scene description used to estimate scene pose from detected markers: see [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function below.
-
- aoi_3d_scene: AOI 3D scene description that will be projected onto estimated scene once its pose will be estimated : see [project][argaze.ArFeatures.ArScene.project] function below.
-
- frames: All scene frames
-
- aruco_axis: Optional dictionary to define orthogonal axis where each axis is defined by list of 3 markers identifier (first is origin). \
- This pose estimation strategy is used by [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function when at least 3 markers are detected.
-
- aruco_aoi: Optional dictionary of AOI defined by list of markers identifier and markers corners index tuples: see [build_aruco_aoi_scene][argaze.ArFeatures.ArScene.build_aruco_aoi_scene] function below.
-
- angle_tolerance: Optional angle error tolerance to validate marker pose in degree used into [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function.
-
- distance_tolerance: Optional distance error tolerance to validate marker pose in centimeter used into [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function.
- """
-
- aruco_scene: ArUcoScene.ArUcoScene = field(default_factory=ArUcoScene.ArUcoScene)
- aoi_3d_scene: AOI3DScene.AOI3DScene = field(default_factory=AOI3DScene.AOI3DScene)
- frames: dict = field(default_factory=dict)
- aruco_axis: dict = field(default_factory=dict)
- aruco_aoi: dict = field(default_factory=dict)
- angle_tolerance: float = field(default=0.)
- distance_tolerance: float = field(default=0.)
-
- def __post_init__(self):
-
- # Define environment attribute: it will be setup by parent environment later
- self._environment = None
-
- # Preprocess orthogonal projection to speed up further aruco aoi processings
- self.__orthogonal_projection_cache = self.aoi_3d_scene.orthogonal_projection
-
- # Setup frames scene after frame creation
- for name, frame in self.frames.items():
- frame._scene = self
-
- def __str__(self) -> str:
- """
- Returns:
- String representation
- """
-
- output = f'ArEnvironment:\n{self._environment.name}\n'
- output += f'ArUcoScene:\n{self.aruco_scene}\n'
- output += f'AOI3DScene:\n{self.aoi_3d_scene}\n'
-
- return output
-
- def estimate_pose(self, detected_markers) -> Tuple[numpy.array, numpy.array, str, dict]:
- """Estimate scene pose from detected ArUco markers.
-
- Returns:
- scene translation vector
- scene rotation matrix
- pose estimation strategy
- dict of markers used to estimate the pose
- """
-
- # Pose estimation fails when no marker is detected
- if len(detected_markers) == 0:
-
- raise PoseEstimationFailed('No marker detected')
-
- scene_markers, _ = self.aruco_scene.filter_markers(detected_markers)
-
- # Pose estimation fails when no marker belongs to the scene
- if len(scene_markers) == 0:
-
- raise PoseEstimationFailed('No marker belongs to the scene')
-
- # Estimate scene pose from unique marker transformations
- elif len(scene_markers) == 1:
-
- marker_id, marker = scene_markers.popitem()
- tvec, rmat = self.aruco_scene.estimate_pose_from_single_marker(marker)
-
- return tvec, rmat, 'estimate_pose_from_single_marker', {marker_id: marker}
-
- # Try to estimate scene pose from 3 markers defining an orthogonal axis
- elif len(scene_markers) >= 3 and len(self.aruco_axis) > 0:
-
- for axis_name, axis_markers in self.aruco_axis.items():
-
- try:
-
- origin_marker = scene_markers[axis_markers['origin_marker']]
- horizontal_axis_marker = scene_markers[axis_markers['horizontal_axis_marker']]
- vertical_axis_marker = scene_markers[axis_markers['vertical_axis_marker']]
-
- tvec, rmat = self.aruco_scene.estimate_pose_from_axis_markers(origin_marker, horizontal_axis_marker, vertical_axis_marker)
-
- return tvec, rmat, 'estimate_pose_from_axis_markers', {origin_marker.identifier: origin_marker, horizontal_axis_marker.identifier: horizontal_axis_marker, vertical_axis_marker.identifier: vertical_axis_marker}
-
- except:
- pass
-
- raise PoseEstimationFailed('No marker axis')
-
- # Otherwise, check markers consistency
- consistent_markers, unconsistent_markers, unconsistencies = self.aruco_scene.check_markers_consistency(scene_markers, self.angle_tolerance, self.distance_tolerance)
-
- # Pose estimation fails when no marker passes consistency checking
- if len(consistent_markers) == 0:
-
- raise PoseEstimationFailed('Unconsistent marker poses', unconsistencies)
-
- # Otherwise, estimate scene pose from all consistent markers pose
- tvec, rmat = self.aruco_scene.estimate_pose_from_markers(consistent_markers)
-
- return tvec, rmat, 'estimate_pose_from_markers', consistent_markers
-
- def project(self, tvec: numpy.array, rvec: numpy.array, visual_hfov: float = 0.) -> AOI2DScene.AOI2DScene:
- """Project AOI scene according estimated pose and optional horizontal field of view clipping angle.
-
- Parameters:
- tvec: translation vector
- rvec: rotation vector
- visual_hfov: horizontal field of view clipping angle
- """
-
- # Clip AOI out of the visual horizontal field of view (optional)
- if visual_hfov > 0:
-
- # Transform scene into camera referential
- aoi_3d_scene_camera_ref = self.aoi_3d_scene.transform(tvec, rvec)
-
- # Get aoi inside vision cone field
- cone_vision_height_cm = 200 # cm
- cone_vision_radius_cm = numpy.tan(numpy.deg2rad(visual_hfov / 2)) * cone_vision_height_cm
-
- _, aoi_outside = aoi_3d_scene_camera_ref.vision_cone(cone_vision_radius_cm, cone_vision_height_cm)
-
- # Keep only aoi inside vision cone field
- aoi_3d_scene_copy = self.aoi_3d_scene.copy(exclude=aoi_outside.keys())
-
- else:
-
- aoi_3d_scene_copy = self.aoi_3d_scene.copy()
-
- aoi_2d_scene = aoi_3d_scene_copy.project(tvec, rvec, self._environment.aruco_detector.optic_parameters.K)
-
- # Warn user when the projected scene is empty
- if len(aoi_2d_scene) == 0:
-
- raise SceneProjectionFailed('AOI projection is empty')
-
- return aoi_2d_scene
-
- def build_aruco_aoi_scene(self, detected_markers) -> AOI2DScene.AOI2DScene:
- """
- Build AOI scene from detected ArUco markers as defined in aruco_aoi dictionary.
-
- Returns:
- built AOI 2D scene
- """
-
- # Check aruco aoi is defined
- if len(self.aruco_aoi) == 0:
-
- raise SceneProjectionFailed('No aruco aoi is defined')
-
- # AOI projection fails when no marker is detected
- if len(detected_markers) == 0:
-
- raise SceneProjectionFailed('No marker detected')
-
- aruco_aoi_scene = {}
-
- for aruco_aoi_name, aoi in self.aruco_aoi.items():
-
- # Each aoi's corner is defined by a marker's corner
- aoi_corners = []
- for corner in ["upper_left_corner", "upper_right_corner", "lower_right_corner", "lower_left_corner"]:
-
- marker_identifier = aoi[corner]["marker_identifier"]
-
- try:
-
- aoi_corners.append(detected_markers[marker_identifier].corners[0][aoi[corner]["marker_corner_index"]])
-
- except Exception as e:
-
- raise SceneProjectionFailed(f'Missing marker #{e} to build ArUco AOI scene')
-
- aruco_aoi_scene[aruco_aoi_name] = AOIFeatures.AreaOfInterest(aoi_corners)
-
- # Then each inner aoi is projected from the current aruco aoi
- for inner_aoi_name, inner_aoi in self.aoi_3d_scene.items():
-
- if aruco_aoi_name != inner_aoi_name:
-
- aoi_corners = [numpy.array(aruco_aoi_scene[aruco_aoi_name].outter_axis(inner)) for inner in self.__orthogonal_projection_cache[inner_aoi_name]]
- aruco_aoi_scene[inner_aoi_name] = AOIFeatures.AreaOfInterest(aoi_corners)
-
- return AOI2DScene.AOI2DScene(aruco_aoi_scene)
-
- def draw_axis(self, image: numpy.array):
- """
- Draw scene axis into image.
-
- Parameters:
- image: where to draw
- """
-
- self.aruco_scene.draw_axis(image, self._environment.aruco_detector.optic_parameters.K, self._environment.aruco_detector.optic_parameters.D)
-
- def draw_places(self, image: numpy.array):
- """
- Draw scene places into image.
-
- Parameters:
- image: where to draw
- """
-
- self.aruco_scene.draw_places(image, self._environment.aruco_detector.optic_parameters.K, self._environment.aruco_detector.optic_parameters.D)
-
-@dataclass
class ArFrame():
"""
Define Augmented Reality frame as an AOI2DScene made from a projected then reframed parent AOI3DScene.
@@ -702,9 +455,15 @@ class ArFrame():
self.__looking_lock = threading.Lock()
@classmethod
- def from_scene(self, aoi_3d_scene, aoi_name, size, background, gaze_movement_identifier, scan_path_analyzers: list = [], aoi_scan_path_analyzers: list = [], heatmap: bool = False) -> ArFrameType:
+ def from_scene(self, aoi_3d_scene, aoi_name, size, background: numpy.array = numpy.empty((0, 0)), gaze_movement_identifier: GazeFeatures.GazeMovementIdentifier = None, scan_path_analyzers: list = [], aoi_scan_path_analyzers: list = [], heatmap: bool = False) -> ArFrameType:
- aoi_2d_scene = aoi_3d_scene.orthogonal_projection.reframe(aoi_name, size)
+ if aoi_name:
+
+ aoi_2d_scene = aoi_3d_scene.orthogonal_projection.reframe(aoi_name, size)
+
+ else:
+
+ aoi_2d_scene = AOI2DScene.AOI2DScene()
return ArFrame(aoi_name, \
size, \
@@ -885,3 +644,317 @@ class ArFrame():
# Return looking data
return gaze_movement, look_at, scan_step_analysis, aoi_scan_step_analysis
+
+ @property
+ def image(self,):
+ """
+ Get frame image
+ """
+
+ image = self.background.copy()
+
+ self.aoi_2d_scene.draw(image, color=(255, 255, 255))
+ self.current_gaze_position.draw(image, color=(255, 255, 255))
+
+ self.current_gaze_movement.draw(image, color=(0, 255, 255))
+ self.current_gaze_movement.draw_positions(image)
+
+ # Check frame fixation
+ if GazeFeatures.is_fixation(self.current_gaze_movement):
+
+ # Draw looked AOI
+ self.aoi_2d_scene.draw_circlecast(image, self.current_gaze_movement.focus, self.current_gaze_movement.deviation_max, base_color=(0, 0, 0), matching_color=(255, 255, 255))
+
+ return image
+
+@dataclass
+class ArScene():
+ """
+ Define an Augmented Reality scene with ArUco markers and AOI scenes.
+
+ Parameters:
+ aruco_scene: ArUco markers 3D scene description used to estimate scene pose from detected markers: see [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function below.
+
+ aoi_3d_scene: AOI 3D scene description that will be projected onto estimated scene once its pose will be estimated : see [project][argaze.ArFeatures.ArScene.project] function below.
+
+ camera_frame: Where AOI 3D scene will be projected
+
+ aoi_frames: Optional dictionary to define AOI as ArFrame.
+
+ aruco_axis: Optional dictionary to define orthogonal axis where each axis is defined by list of 3 markers identifier (first is origin). \
+ This pose estimation strategy is used by [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function when at least 3 markers are detected.
+
+ aruco_aoi: Optional dictionary of AOI defined by list of markers identifier and markers corners index tuples: see [build_aruco_aoi_scene][argaze.ArFeatures.ArScene.build_aruco_aoi_scene] function below.
+
+ angle_tolerance: Optional angle error tolerance to validate marker pose in degree used into [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function.
+
+ distance_tolerance: Optional distance error tolerance to validate marker pose in centimeter used into [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function.
+ """
+
+ aruco_scene: ArUcoScene.ArUcoScene = field(default_factory=ArUcoScene.ArUcoScene)
+ aoi_3d_scene: AOI3DScene.AOI3DScene = field(default_factory=AOI3DScene.AOI3DScene)
+ camera_frame: ArFrame = field(default_factory=ArFrame)
+ aoi_frames: dict = field(default_factory=dict)
+ aruco_axis: dict = field(default_factory=dict)
+ aruco_aoi: dict = field(default_factory=dict)
+ angle_tolerance: float = field(default=0.)
+ distance_tolerance: float = field(default=0.)
+
+ def __post_init__(self):
+
+ # Define environment attribute: it will be setup by parent environment later
+ self._environment = None
+
+ # Preprocess orthogonal projection to speed up further aruco aoi processings
+ self.__orthogonal_projection_cache = self.aoi_3d_scene.orthogonal_projection
+
+ # Setup ArFrame scene attribute after ArFrame creation
+ for aoi_name, frame in self.aoi_frames.items():
+ frame._scene = self
+
+ # Init lock to share camera frame with multiples threads
+ self.__camera_frame_lock = threading.Lock()
+
+ def __str__(self) -> str:
+ """
+ Returns:
+ String representation
+ """
+
+ output = f'ArEnvironment:\n{self._environment.name}\n'
+ output += f'ArUcoScene:\n{self.aruco_scene}\n'
+ output += f'AOI3DScene:\n{self.aoi_3d_scene}\n'
+
+ return output
+
+ def estimate_pose(self, detected_markers) -> Tuple[numpy.array, numpy.array, str, dict]:
+ """Estimate scene pose from detected ArUco markers.
+
+ Returns:
+ scene translation vector
+ scene rotation matrix
+ pose estimation strategy
+ dict of markers used to estimate the pose
+ """
+
+ # Pose estimation fails when no marker is detected
+ if len(detected_markers) == 0:
+
+ raise PoseEstimationFailed('No marker detected')
+
+ scene_markers, _ = self.aruco_scene.filter_markers(detected_markers)
+
+ # Pose estimation fails when no marker belongs to the scene
+ if len(scene_markers) == 0:
+
+ raise PoseEstimationFailed('No marker belongs to the scene')
+
+ # Estimate scene pose from unique marker transformations
+ elif len(scene_markers) == 1:
+
+ marker_id, marker = scene_markers.popitem()
+ tvec, rmat = self.aruco_scene.estimate_pose_from_single_marker(marker)
+
+ return tvec, rmat, 'estimate_pose_from_single_marker', {marker_id: marker}
+
+ # Try to estimate scene pose from 3 markers defining an orthogonal axis
+ elif len(scene_markers) >= 3 and len(self.aruco_axis) > 0:
+
+ for axis_name, axis_markers in self.aruco_axis.items():
+
+ try:
+
+ origin_marker = scene_markers[axis_markers['origin_marker']]
+ horizontal_axis_marker = scene_markers[axis_markers['horizontal_axis_marker']]
+ vertical_axis_marker = scene_markers[axis_markers['vertical_axis_marker']]
+
+ tvec, rmat = self.aruco_scene.estimate_pose_from_axis_markers(origin_marker, horizontal_axis_marker, vertical_axis_marker)
+
+ return tvec, rmat, 'estimate_pose_from_axis_markers', {origin_marker.identifier: origin_marker, horizontal_axis_marker.identifier: horizontal_axis_marker, vertical_axis_marker.identifier: vertical_axis_marker}
+
+ except:
+ pass
+
+ raise PoseEstimationFailed('No marker axis')
+
+ # Otherwise, check markers consistency
+ consistent_markers, unconsistent_markers, unconsistencies = self.aruco_scene.check_markers_consistency(scene_markers, self.angle_tolerance, self.distance_tolerance)
+
+ # Pose estimation fails when no marker passes consistency checking
+ if len(consistent_markers) == 0:
+
+ raise PoseEstimationFailed('Unconsistent marker poses', unconsistencies)
+
+ # Otherwise, estimate scene pose from all consistent markers pose
+ tvec, rmat = self.aruco_scene.estimate_pose_from_markers(consistent_markers)
+
+ return tvec, rmat, 'estimate_pose_from_markers', consistent_markers
+
+ def project(self, tvec: numpy.array, rvec: numpy.array, visual_hfov: float = 0.) -> ArFrame:
+ """Project AOI scene according estimated pose and optional horizontal field of view clipping angle.
+
+ Parameters:
+ tvec: translation vector
+ rvec: rotation vector
+ visual_hfov: horizontal field of view clipping angle
+ """
+
+ # Clip AOI out of the visual horizontal field of view (optional)
+ if visual_hfov > 0:
+
+ # Transform scene into camera referential
+ aoi_3d_scene_camera_ref = self.aoi_3d_scene.transform(tvec, rvec)
+
+ # Get aoi inside vision cone field
+ cone_vision_height_cm = 200 # cm
+ cone_vision_radius_cm = numpy.tan(numpy.deg2rad(visual_hfov / 2)) * cone_vision_height_cm
+
+ _, aoi_outside = aoi_3d_scene_camera_ref.vision_cone(cone_vision_radius_cm, cone_vision_height_cm)
+
+ # Keep only aoi inside vision cone field
+ aoi_3d_scene_copy = self.aoi_3d_scene.copy(exclude=aoi_outside.keys())
+
+ else:
+
+ aoi_3d_scene_copy = self.aoi_3d_scene.copy()
+
+ # Lock camera frame exploitation
+ self.__camera_frame_lock.acquire()
+
+ # Update camera frame
+ self.camera_frame.aoi_2d_scene = aoi_3d_scene_copy.project(tvec, rvec, self._environment.aruco_detector.optic_parameters.K)
+
+ # Unlock camera frame exploitation
+ self.__camera_frame_lock.release()
+
+ # Warn user when the projected scene is empty
+ if len(self.camera_frame.aoi_2d_scene) == 0:
+
+ raise SceneProjectionFailed('AOI projection is empty')
+
+ def build_aruco_aoi_scene(self, detected_markers) -> AOI2DScene.AOI2DScene:
+ """
+ Build AOI scene from detected ArUco markers as defined in aruco_aoi dictionary.
+
+ Returns:
+ built AOI 2D scene
+ """
+
+ # Check aruco aoi is defined
+ if len(self.aruco_aoi) == 0:
+
+ raise SceneProjectionFailed('No aruco aoi is defined')
+
+ # AOI projection fails when no marker is detected
+ if len(detected_markers) == 0:
+
+ raise SceneProjectionFailed('No marker detected')
+
+ aruco_aoi_scene = {}
+
+ for aruco_aoi_name, aoi in self.aruco_aoi.items():
+
+ # Each aoi's corner is defined by a marker's corner
+ aoi_corners = []
+ for corner in ["upper_left_corner", "upper_right_corner", "lower_right_corner", "lower_left_corner"]:
+
+ marker_identifier = aoi[corner]["marker_identifier"]
+
+ try:
+
+ aoi_corners.append(detected_markers[marker_identifier].corners[0][aoi[corner]["marker_corner_index"]])
+
+ except Exception as e:
+
+ raise SceneProjectionFailed(f'Missing marker #{e} to build ArUco AOI scene')
+
+ aruco_aoi_scene[aruco_aoi_name] = AOIFeatures.AreaOfInterest(aoi_corners)
+
+ # Then each inner aoi is projected from the current aruco aoi
+ for inner_aoi_name, inner_aoi in self.aoi_3d_scene.items():
+
+ if aruco_aoi_name != inner_aoi_name:
+
+ aoi_corners = [numpy.array(aruco_aoi_scene[aruco_aoi_name].outter_axis(inner)) for inner in self.__orthogonal_projection_cache[inner_aoi_name]]
+ aruco_aoi_scene[inner_aoi_name] = AOIFeatures.AreaOfInterest(aoi_corners)
+
+ # Lock camera frame exploitation
+ self.__camera_frame_lock.acquire()
+
+ # Update camera frame
+ self.camera_frame.aoi_2d_scene = AOI2DScene.AOI2DScene(aruco_aoi_scene)
+
+ # Unlock camera frame exploitation
+ self.__camera_frame_lock.release()
+
+ def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition):
+ """Project timestamped gaze position into camera frame."""
+
+ # Can't use camera frame when it is locked
+ if self.__camera_frame_lock.locked():
+
+ #TODO: Store ignored timestamped gaze positions for further projections
+ print('Ignoring ', timestamp, gaze_position)
+ return
+
+ # Lock camera frame exploitation
+ self.__camera_frame_lock.acquire()
+
+ # Project gaze position in camera frame
+ yield frame.look(timestamp, inner_gaze_position * frame.size)
+
+ # Project gaze position into each aoi frames if possible
+ for aoi_name, frame in self.aoi_frames.items():
+
+ # Is aoi frame projected into camera frame ?
+ try:
+
+ aoi_2d = self.camera_frame.aoi_2d_scene[frame.name]
+
+ # TODO: Add option to use gaze precision circle
+ if aoi_2d.contains_point(gaze_position.value):
+
+ inner_x, inner_y = aoi_2d.clockwise().inner_axis(gaze_position.value)
+
+ # QUESTION: How to project gaze precision?
+ inner_gaze_position = GazeFeatures.GazePosition((inner_x, inner_y))
+
+ yield frame.look(timestamp, inner_gaze_position * frame.size)
+
+ # Ignore missing aoi frame projection
+ except KeyError:
+
+ pass
+
+ # Unlock camera frame exploitation
+ self.__camera_frame_lock.release()
+
+ def draw(self, image: numpy.array):
+ """
+ Draw camera frame
+
+ Parameters:
+ image: where to draw
+ """
+
+ self.camera_frame.aoi_2d_scene.draw(image)
+
+ def draw_axis(self, image: numpy.array):
+ """
+ Draw scene axis into image.
+
+ Parameters:
+ image: where to draw
+ """
+
+ self.aruco_scene.draw_axis(image, self._environment.aruco_detector.optic_parameters.K, self._environment.aruco_detector.optic_parameters.D)
+
+ def draw_places(self, image: numpy.array):
+ """
+ Draw scene places into image.
+
+ Parameters:
+ image: where to draw
+ """
+
+ self.aruco_scene.draw_places(image, self._environment.aruco_detector.optic_parameters.K, self._environment.aruco_detector.optic_parameters.D)