aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/argaze/ArFeatures.py160
1 files changed, 154 insertions, 6 deletions
diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py
index 3101a45..321bd1b 100644
--- a/src/argaze/ArFeatures.py
+++ b/src/argaze/ArFeatures.py
@@ -11,10 +11,12 @@ from typing import TypeVar, Tuple
from dataclasses import dataclass, field
import json
import os
+import importlib
-from argaze import DataStructures
+from argaze import DataStructures, GazeFeatures
from argaze.ArUcoMarkers import *
from argaze.AreaOfInterest import *
+from argaze.GazeAnalysis import *
import numpy
@@ -48,6 +50,9 @@ class ArEnvironment():
for name, scene in self.scenes.items():
scene._environment = self
+ # Init AOI scene projections
+ self.__aoi_scene_projections = {}
+
@classmethod
def from_json(self, json_filepath: str) -> ArSceneType:
"""
@@ -139,8 +144,23 @@ class ArEnvironment():
new_screen_size = screen_data.pop('size')
+ # Load gaze movement identifier
+ try:
+
+ gaze_movement_identifier_value = screen_data.pop('gaze_movement_identifier')
+
+ gaze_movement_identifier_type = gaze_movement_identifier_value['type']
+ gaze_movement_identifier_parameters = gaze_movement_identifier_value['parameters']
+
+ gaze_movement_identifier_module = importlib.import_module(f'argaze.GazeAnalysis.{gaze_movement_identifier_type}')
+ gaze_movement_identifier = gaze_movement_identifier_module.GazeMovementIdentifier(**gaze_movement_identifier_parameters)
+
+ except:
+
+ gaze_movement_identifier = None
+
# Append new screen
- new_screens[screen_name] = ArScreen.from_scene(new_aoi_scene, screen_name, new_screen_size)
+ new_screens[screen_name] = ArScreen.from_scene(new_aoi_scene, screen_name, new_screen_size, gaze_movement_identifier)
# Append new scene
new_scenes[scene_name] = ArScene(new_aruco_scene, new_aoi_scene, new_screens, **scene_data)
@@ -161,6 +181,100 @@ class ArEnvironment():
return output
+ @property
+ def image(self):
+ """Get environment image and screen images."""
+
+ # Draw detected markers
+ self.aruco_detector.draw_detected_markers(self.__image)
+
+ # Draw each AOI scene
+ for scene_name, aoi_scene_projection in self.__aoi_scene_projections.items():
+
+ # Draw AOI scene projection
+ aoi_scene_projection.draw(self.__image, color=(255, 255, 255))
+
+ return self.__image
+
+ def screens_image(self):
+ """Generate each screen image"""
+
+ # Draw each scene screens
+ for scene_name, scene in self.scenes.items():
+
+ for screen_name, screen in scene.screens.items():
+
+ screen.draw_aoi()
+ screen.draw_gaze_position()
+ screen.draw_gaze_movement()
+
+ yield scene_name, screen_name, screen.image
+
+ def detect_and_project(self, image: numpy.array) -> dict:
+ """Detect environment aruco markers from image and project scenes."""
+
+ self.__image = image
+
+ # Detect aruco markers
+ self.aruco_detector.detect_markers(self.__image)
+
+ # Project each AOI scene
+ self.__aoi_scene_projections = {}
+ for scene_name, scene in self.scenes.items():
+
+ # Filter scene markers
+ scene_markers, _ = scene.aruco_scene.filter_markers(self.aruco_detector.detected_markers)
+
+ # Reset each scene screens
+ for screen_name, screen in scene.screens.items():
+
+ screen.init()
+
+ # Project scene
+ try:
+
+ # Try to build AOI scene from detected ArUco marker corners
+ self.__aoi_scene_projections[scene_name] = scene.build_aruco_aoi_scene(scene_markers)
+
+ except:
+
+ # Estimate scene markers poses
+ self.aruco_detector.estimate_markers_pose(scene.aruco_scene.identifiers)
+
+ # Estimate scene pose from detected scene markers
+ tvec, rmat, _, _ = scene.estimate_pose(scene_markers)
+
+ # Project AOI scene into video image according estimated pose
+ self.__aoi_scene_projections[scene_name] = scene.project(tvec, rmat)
+
+ def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition):
+ """Project gaze position into environment at particular time."""
+
+ # For each aoi scene projection
+ for scene_name, scene in self.scenes.items():
+
+ try:
+
+ aoi_scene_projection = self.__aoi_scene_projections[scene_name]
+
+ # For each scene screens
+ for screen_name, screen in scene.screens.items():
+
+ # TODO: Add option to use gaze precision circle
+ if aoi_scene_projection[screen.name].contains_point(gaze_position.value):
+
+ inner_x, inner_y = self.__aoi_scene_projections[scene_name][screen.name].clockwise().inner_axis(gaze_position.value)
+
+ # QUESTION: How to project gaze precision?
+ inner_gaze_position = GazeFeatures.GazePosition((inner_x, inner_y))
+
+ screen.look(timestamp, inner_gaze_position)
+
+ # Ignore missing aoi scene projection
+ except KeyError:
+
+ pass
+
def to_json(self, json_filepath):
"""Save environment to .json file."""
@@ -419,6 +533,7 @@ class ArScreen():
name: str
size: tuple[int] = field(default=(1, 1))
aoi_screen: AOI2DScene.AOI2DScene = field(default_factory=AOI2DScene.AOI2DScene)
+ gaze_movement_identifier: GazeFeatures.GazeMovementIdentifier = field(default_factory=GazeFeatures.GazeMovementIdentifier)
def __post_init__(self):
@@ -428,10 +543,14 @@ class ArScreen():
# Init screen
self.init()
+ # Init gaze data
+ self.__gaze_position = GazeFeatures.UnvalidGazePosition()
+ self.__gaze_movement = None
+
@classmethod
- def from_scene(self, aoi_scene, aoi_name, size) -> ArScreenType:
+ def from_scene(self, aoi_scene, aoi_name, size, gaze_movement_identifier) -> ArScreenType:
- return ArScreen(aoi_name, size, aoi_scene.orthogonal_projection.reframe(aoi_name, size))
+ return ArScreen(aoi_name, size, aoi_scene.orthogonal_projection.reframe(aoi_name, size), gaze_movement_identifier)
@property
def image(self):
@@ -440,11 +559,40 @@ class ArScreen():
return self.__image
def init(self) -> ArScreenType:
- """Initialize screen image."""
+ """Initialize screen image and gaze position."""
self.__image = numpy.zeros((self.size[1], self.size[0], 3)).astype(numpy.uint8)
- def draw_aoi(self, color=(255, 255, 255)) -> ArScreenType:
+ def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition):
+
+ self.__gaze_position = gaze_position * self.size
+
+ if self.gaze_movement_identifier:
+
+ # Identify gaze movement
+ self.__gaze_movement = self.gaze_movement_identifier.identify(timestamp, self.__gaze_position)
+
+ if GazeFeatures.is_fixation(self.__gaze_movement):
+
+ print(f'Fixation identified in {self.name} screen')
+
+ elif GazeFeatures.is_saccade(self.__gaze_movement):
+
+ print(f'Saccade identified in {self.name} screen')
+
+ def draw_aoi(self, color=(255, 255, 255)):
"""Draw aoi into screen image."""
self.aoi_screen.draw(self.__image, color)
+
+ def draw_gaze_position(self, color=(255, 255, 255)):
+ """Draw current gaze position into screen image."""
+
+ self.__gaze_position.draw(self.__image, color)
+
+ def draw_gaze_movement(self, color=(255, 255, 255)):
+ """Draw current gaze movement into screen image."""
+
+ if self.__gaze_movement:
+
+ self.__gaze_movement.draw(self.__image, color)