aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/argaze/ArFeatures.py228
1 files changed, 145 insertions, 83 deletions
diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py
index 146325a..76d048d 100644
--- a/src/argaze/ArFeatures.py
+++ b/src/argaze/ArFeatures.py
@@ -69,40 +69,46 @@ class ArEnvironment():
working_directory = os.path.dirname(json_filepath)
new_name = data.pop('name')
- new_detector_data = data.pop('aruco_detector')
- new_aruco_dictionary = ArUcoMarkersDictionary.ArUcoMarkersDictionary(**new_detector_data.pop('dictionary'))
- new_marker_size = new_detector_data.pop('marker_size')
+ try:
+ new_detector_data = data.pop('aruco_detector')
- # Check optic_parameters value type
- optic_parameters_value = new_detector_data.pop('optic_parameters')
+ new_aruco_dictionary = ArUcoMarkersDictionary.ArUcoMarkersDictionary(**new_detector_data.pop('dictionary'))
+ new_marker_size = new_detector_data.pop('marker_size')
- # str: relative path to .json file
- if type(optic_parameters_value) == str:
+ # Check optic_parameters value type
+ optic_parameters_value = new_detector_data.pop('optic_parameters')
- optic_parameters_value = os.path.join(working_directory, optic_parameters_value)
- new_optic_parameters = ArUcoOpticCalibrator.OpticParameters.from_json(optic_parameters_value)
+ # str: relative path to .json file
+ if type(optic_parameters_value) == str:
- # dict:
- else:
+ optic_parameters_value = os.path.join(working_directory, optic_parameters_value)
+ new_optic_parameters = ArUcoOpticCalibrator.OpticParameters.from_json(optic_parameters_value)
- new_optic_parameters = ArUcoOpticCalibrator.OpticParameters(**optic_parameters_value)
+ # dict:
+ else:
- # Check detector parameters value type
- detector_parameters_value = new_detector_data.pop('parameters')
+ new_optic_parameters = ArUcoOpticCalibrator.OpticParameters(**optic_parameters_value)
- # str: relative path to .json file
- if type(detector_parameters_value) == str:
+ # Check detector parameters value type
+ detector_parameters_value = new_detector_data.pop('parameters')
- detector_parameters_value = os.path.join(working_directory, detector_parameters_value)
- new_aruco_detector_parameters = ArUcoDetector.DetectorParameters.from_json(detector_parameters_value)
+ # str: relative path to .json file
+ if type(detector_parameters_value) == str:
- # dict:
- else:
+ detector_parameters_value = os.path.join(working_directory, detector_parameters_value)
+ new_aruco_detector_parameters = ArUcoDetector.DetectorParameters.from_json(detector_parameters_value)
- new_aruco_detector_parameters = ArUcoDetector.DetectorParameters(**detector_parameters_value)
-
- new_aruco_detector = ArUcoDetector.ArUcoDetector(new_aruco_dictionary, new_marker_size, new_optic_parameters, new_aruco_detector_parameters)
+ # dict:
+ else:
+
+ new_aruco_detector_parameters = ArUcoDetector.DetectorParameters(**detector_parameters_value)
+
+ new_aruco_detector = ArUcoDetector.ArUcoDetector(new_aruco_dictionary, new_marker_size, new_optic_parameters, new_aruco_detector_parameters)
+
+ except KeyError:
+
+ new_aruco_detector = None
# Build scenes
new_scenes = {}
@@ -111,19 +117,25 @@ class ArEnvironment():
new_aruco_scene = None
new_aoi_scene = None
- # Check aruco_scene value type
- aruco_scene_value = scene_data.pop('aruco_scene')
+ try:
- # str: relative path to .obj file
- if type(aruco_scene_value) == str:
+ # Check aruco_scene value type
+ aruco_scene_value = scene_data.pop('aruco_scene')
- aruco_scene_value = os.path.join(working_directory, aruco_scene_value)
- new_aruco_scene = ArUcoScene.ArUcoScene.from_obj(aruco_scene_value)
+ # str: relative path to .obj file
+ if type(aruco_scene_value) == str:
- # dict:
- else:
+ aruco_scene_value = os.path.join(working_directory, aruco_scene_value)
+ new_aruco_scene = ArUcoScene.ArUcoScene.from_obj(aruco_scene_value)
+
+ # dict:
+ else:
+
+ new_aruco_scene = ArUcoScene.ArUcoScene(**aruco_scene_value)
- new_aruco_scene = ArUcoScene.ArUcoScene(**aruco_scene_value)
+ except KeyError:
+
+ new_aruco_scene = None
# Check aoi_scene value type
aoi_scene_value = scene_data.pop('aoi_scene')
@@ -172,7 +184,7 @@ class ArEnvironment():
gaze_movement_identifier = None
# Append new screen
- new_screens[screen_name] = ArScreen.from_scene(new_aoi_scene, screen_name, new_screen_size, new_screen_background, gaze_movement_identifier)
+ new_screens[screen_name] = ArScreen.from_scene(new_aoi_scene, screen_name, new_screen_size, new_screen_background, gaze_movement_identifier, **screen_data)
# Append new scene
new_scenes[scene_name] = ArScene(new_aruco_scene, new_aoi_scene, new_screens, **scene_data)
@@ -195,7 +207,7 @@ class ArEnvironment():
@property
def image(self):
- """Get environment image and screen images."""
+ """Get ArUco detection visualisation and scenes projections."""
# Draw detected markers
self.aruco_detector.draw_detected_markers(self.__image)
@@ -208,20 +220,17 @@ class ArEnvironment():
return self.__image
- def screens_image(self):
- """Generate each screen image"""
+ @property
+ def screens(self):
+ """Iterate over all environment screens"""
- # Draw each scene screens
+ # For each scene
for scene_name, scene in self.scenes.items():
+ # For each screen
for screen_name, screen in scene.screens.items():
- screen.draw_background()
- screen.draw_aoi()
- screen.draw_gaze_position()
- screen.draw_gaze_movement()
-
- yield scene_name, screen_name, screen.image
+ yield scene_name, screen_name, screen
def detect_and_project(self, image: numpy.array) -> dict:
"""Detect environment aruco markers from image and project scenes."""
@@ -230,28 +239,25 @@ class ArEnvironment():
# Detect aruco markers
self.aruco_detector.detect_markers(self.__image)
-
+
# Project each AOI scene
self.__aoi_scene_projections = {}
for scene_name, scene in self.scenes.items():
- # Filter scene markers
- scene_markers, _ = scene.aruco_scene.filter_markers(self.aruco_detector.detected_markers)
-
# Project scene
try:
# Try to build AOI scene from detected ArUco marker corners
- self.__aoi_scene_projections[scene_name] = scene.build_aruco_aoi_scene(scene_markers)
+ self.__aoi_scene_projections[scene_name] = scene.build_aruco_aoi_scene(self.aruco_detector.detected_markers)
except:
# Estimate scene markers poses
self.aruco_detector.estimate_markers_pose(scene.aruco_scene.identifiers)
-
+
# Estimate scene pose from detected scene markers
- tvec, rmat, _, _ = scene.estimate_pose(scene_markers)
-
+ tvec, rmat, _, _ = scene.estimate_pose(self.aruco_detector.detected_markers)
+
# Project AOI scene into video image according estimated pose
self.__aoi_scene_projections[scene_name] = scene.project(tvec, rmat)
@@ -276,7 +282,7 @@ class ArEnvironment():
# QUESTION: How to project gaze precision?
inner_gaze_position = GazeFeatures.GazePosition((inner_x, inner_y))
- screen.look(timestamp, inner_gaze_position)
+ screen.look(timestamp, inner_gaze_position * screen.size)
# Ignore missing aoi scene projection
except KeyError:
@@ -536,73 +542,129 @@ class ArScreen():
name: name of the screen
size: screen dimension in pixel.
background: image to draw behind
- aoi_screen: AOI 2D scene description ... : see [orthogonal_projection][argaze.ArFeatures.ArScene.orthogonal_projection] and [reframe][argaze.AreaOfInterest.AOI2DScene.reframe] functions.
+ aoi_2d_scene: AOI 2D scene description ... : see [orthogonal_projection][argaze.ArFeatures.ArScene.orthogonal_projection] and [reframe][argaze.AreaOfInterest.AOI2DScene.reframe] functions.
"""
name: str
size: tuple[int] = field(default=(1, 1))
background: numpy.array = field(default_factory=numpy.array)
- aoi_screen: AOI2DScene.AOI2DScene = field(default_factory=AOI2DScene.AOI2DScene)
+ aoi_2d_scene: AOI2DScene.AOI2DScene = field(default_factory=AOI2DScene.AOI2DScene)
gaze_movement_identifier: GazeFeatures.GazeMovementIdentifier = field(default_factory=GazeFeatures.GazeMovementIdentifier)
+ scan_path: GazeFeatures.ScanPath = field(default_factory=GazeFeatures.ScanPath)
+ aoi_scan_path: GazeFeatures.AOIScanPath = field(default_factory=GazeFeatures.AOIScanPath)
+ heatmap: AOIFeatures.Heatmap = field(default_factory=AOIFeatures.Heatmap)
def __post_init__(self):
# Define scene attribute: it will be setup by parent scene later
self._scene = None
- # Init screen image
- self.draw_background()
-
# Init gaze data
self.__gaze_position = GazeFeatures.UnvalidGazePosition()
- self.__gaze_movement = GazeFeatures.UnvalidGazeMovement()
+
+ if self.heatmap:
+
+ self.heatmap.init()
@classmethod
- def from_scene(self, aoi_scene, aoi_name, size, background, gaze_movement_identifier) -> ArScreenType:
+ def from_scene(self, aoi_scene, aoi_name, size, background, gaze_movement_identifier, scan_path: bool = False, aoi_scan_path: bool = False, heatmap: bool = False) -> ArScreenType:
- return ArScreen(aoi_name, size, background, aoi_scene.orthogonal_projection.reframe(aoi_name, size), gaze_movement_identifier)
+ aoi_scene_projection = aoi_scene.orthogonal_projection.reframe(aoi_name, size)
+
+ return ArScreen(aoi_name, \
+ size, \
+ background, \
+ aoi_scene_projection, \
+ gaze_movement_identifier, \
+ GazeFeatures.ScanPath() if scan_path else None, \
+ GazeFeatures.AOIScanPath(aoi_scene_projection.keys()) if aoi_scan_path else None, \
+ AOIFeatures.Heatmap(size) if heatmap else None \
+ )
@property
- def image(self):
- """Get screen image."""
+ def current_gaze_position(self):
+ """Get current gaze position on screen."""
- return self.__image
+ return self.__gaze_position
- def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition):
+ @property
+ def current_gaze_movement(self):
+ """Get current gaze movement on screen."""
+
+ # Check current screen fixation
+ current_fixation = self.gaze_movement_identifier.current_fixation
+
+ if current_fixation.valid:
+
+ return current_fixation
+
+ # Check current screen saccade
+ current_saccade = self.gaze_movement_identifier.current_saccade
- self.__gaze_position = gaze_position * self.size
+ if current_saccade.valid:
+ return current_saccade
+
+ return GazeFeatures.UnvalidGazeMovement()
+
+ def look(self, timestamp: int|float, inner_gaze_position: GazeFeatures.GazePosition):
+ """
+
+ GazeFeatures.AOIScanStepError
+ """
+
+ self.__gaze_position = inner_gaze_position
+
+ # Identify gaze movement
if self.gaze_movement_identifier:
# Identify gaze movement
- self.__gaze_movement = self.gaze_movement_identifier.identify(timestamp, self.__gaze_position)
+ gaze_movement = self.gaze_movement_identifier.identify(timestamp, self.__gaze_position)
+
+ # QUESTION: How to notify new gaze movement?
- if GazeFeatures.is_fixation(self.__gaze_movement):
+ if GazeFeatures.is_fixation(gaze_movement):
- print(f'Fixation identified in {self.name} screen')
+ # Does the fixation match an AOI?
+ look_at = self.name
+ for name, aoi in self.aoi_2d_scene.items():
- elif GazeFeatures.is_saccade(self.__gaze_movement):
+ _, _, circle_ratio = aoi.circle_intersection(gaze_movement.focus, self.gaze_movement_identifier.deviation_max_threshold)
- print(f'Saccade identified in {self.name} screen')
+ if circle_ratio > 0.25:
- def draw_background(self) -> ArScreenType:
- """Initialize screen image with background image."""
+ if name != self.name:
- assert(self.background.shape[0] != self.size[0] or self.background.shape[1] != self.size[1])
+ look_at = name
+ break
- self.__image = self.background.copy()
+ # Append fixation to scan path
+ if self.scan_path:
- def draw_aoi(self, color=(255, 255, 255)):
- """Draw aoi into screen image."""
+ self.scan_path.append_fixation(timestamp, gaze_movement)
+
+ # Append fixation to aoi scan path
+ if self.aoi_scan_path:
+
+ self.__aoi_scan_step = self.aoi_scan_path.append_fixation(timestamp, gaze_movement, look_at)
+
+ # QUESTION: How to notify new step?
+
+ elif GazeFeatures.is_saccade(gaze_movement):
+
+ # Append saccade to scan path
+ if self.scan_path:
+
+ self.__scan_step = self.scan_path.append_saccade(timestamp, gaze_movement)
- self.aoi_screen.draw(self.__image, color)
+ # QUESTION: How to notify new step?
- def draw_gaze_position(self, color=(255, 255, 255)):
- """Draw current gaze position into screen image."""
+ # Append saccade to aoi scan path
+ if self.aoi_scan_path:
- self.__gaze_position.draw(self.__image, color)
+ self.aoi_scan_path.append_saccade(timestamp, gaze_movement)
- def draw_gaze_movement(self, color=(255, 255, 255)):
- """Draw current gaze movement into screen image."""
+ # Update heatmap
+ if self.heatmap:
- self.__gaze_movement.draw_positions(self.__image, color)
+ self.heatmap.update(gaze_position.value, sigma=0.05)