aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThéo de la Hogue2023-07-03 09:51:32 +0200
committerThéo de la Hogue2023-07-03 09:51:32 +0200
commit410e20473f891f12748b6ae8441ba2d3eeaead9a (patch)
tree7e37c735447b146df17e93e7a94385942964339e
parent4f32cdaad425acd1feb76ec1a04a2265e0319a03 (diff)
downloadargaze-410e20473f891f12748b6ae8441ba2d3eeaead9a.zip
argaze-410e20473f891f12748b6ae8441ba2d3eeaead9a.tar.gz
argaze-410e20473f891f12748b6ae8441ba2d3eeaead9a.tar.bz2
argaze-410e20473f891f12748b6ae8441ba2d3eeaead9a.tar.xz
Removing ArEnvironment image attribute. Adding trheading feature to share scene projection between multiple threads.
-rw-r--r--src/argaze/ArFeatures.py106
-rw-r--r--src/argaze/utils/demo_ar_features_run.py8
2 files changed, 80 insertions, 34 deletions
diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py
index 7f1618c..d3ce874 100644
--- a/src/argaze/ArFeatures.py
+++ b/src/argaze/ArFeatures.py
@@ -13,6 +13,7 @@ import json
import os
import importlib
from inspect import getmembers
+import threading
from argaze import DataStructures, GazeFeatures
from argaze.ArUcoMarkers import *
@@ -64,6 +65,9 @@ class ArEnvironment():
# Init AOI scene projections
self.__aoi_2d_scenes = {}
+ # Init a lock to share AOI scene projections between multiple threads
+ self.__aoi_2d_scenes_lock = threading.Lock()
+
@classmethod
def from_json(self, json_filepath: str) -> ArSceneType:
"""
@@ -303,21 +307,6 @@ class ArEnvironment():
return output
@property
- def image(self):
- """Get ArUco detection visualisation and scenes projections."""
-
- # Draw detected markers
- self.aruco_detector.draw_detected_markers(self.__image)
-
- # Draw each AOI scene
- for scene_name, aoi_2d_scene in self.__aoi_2d_scenes.items():
-
- # Draw AOI scene projection
- aoi_2d_scene.draw(self.__image, color=(255, 255, 255))
-
- return self.__image
-
- @property
def screens(self):
"""Iterate over all environment screens"""
@@ -332,34 +321,51 @@ class ArEnvironment():
def detect_and_project(self, image: numpy.array) -> dict:
"""Detect environment aruco markers from image and project scenes."""
- self.__image = image
-
# Detect aruco markers
- self.aruco_detector.detect_markers(self.__image)
+ self.aruco_detector.detect_markers(image)
# Project each AOI scene
- self.__aoi_2d_scenes = {}
+ new_aoi_2d_scenes = {}
for scene_name, scene in self.scenes.items():
# Project scene
try:
# Try to build AOI scene from detected ArUco marker corners
- self.__aoi_2d_scenes[scene_name] = scene.build_aruco_aoi_scene(self.aruco_detector.detected_markers)
+ new_aoi_2d_scenes[scene_name] = scene.build_aruco_aoi_scene(self.aruco_detector.detected_markers)
- except:
+ except SceneProjectionFailed:
# Estimate scene markers poses
self.aruco_detector.estimate_markers_pose(scene.aruco_scene.identifiers)
-
+
# Estimate scene pose from detected scene markers
tvec, rmat, _, _ = scene.estimate_pose(self.aruco_detector.detected_markers)
-
+
# Project AOI scene into video image according estimated pose
- self.__aoi_2d_scenes[scene_name] = scene.project(tvec, rmat)
+ new_aoi_2d_scenes[scene_name] = scene.project(tvec, rmat)
+
+ # Lock scene projections exploitation
+ self.__aoi_2d_scenes_lock.acquire()
+
+ # Copy scene projections
+ self.__aoi_2d_scenes = new_aoi_2d_scenes.copy()
+
+ # Unlock scene projections exploitation
+ self.__aoi_2d_scenes_lock.release()
def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition):
- """Project gaze position into environment at particular time."""
+ """Project timestamped gaze position into current scene projections."""
+
+ # Can't use scene projection when it is locked
+ if self.__aoi_2d_scenes_lock.locked():
+
+ #TODO: Store ignored timestamped gaze positions for further projections
+ print('Ignoring ', timestamp, gaze_position)
+ return
+
+ # Lock scene projections
+ self.__aoi_2d_scenes_lock.acquire()
# For each aoi scene projection
for scene_name, scene in self.scenes.items():
@@ -379,13 +385,19 @@ class ArEnvironment():
# QUESTION: How to project gaze precision?
inner_gaze_position = GazeFeatures.GazePosition((inner_x, inner_y))
- screen.look(timestamp, inner_gaze_position * screen.size)
+ gaze_movement, look_at, scan_step, aoi_scan_step = screen.look(timestamp, inner_gaze_position * screen.size)
+
+ # NOT COMPATIBLE without for loop
+ #yield scene_name, screen_name, screen, gaze_movement, look_at, scan_step, aoi_scan_step
# Ignore missing aoi scene projection
except KeyError:
pass
+ # Unlock scene projections
+ self.__aoi_2d_scenes_lock.release()
+
def to_json(self, json_filepath):
"""Save environment to .json file."""
@@ -393,6 +405,18 @@ class ArEnvironment():
json.dump(self, file, ensure_ascii=False, indent=4, cls=DataStructures.JsonEncoder)
+ def draw(self, image: numpy.array):
+ """Draw ArUco detection visualisation and scenes projections."""
+
+ # Draw detected markers
+ self.aruco_detector.draw_detected_markers(image)
+
+ # Draw each AOI scene
+ for scene_name, aoi_2d_scene in self.__aoi_2d_scenes.items():
+
+ # Draw AOI scene projection
+ aoi_2d_scene.draw(image, color=(255, 255, 255))
+
class PoseEstimationFailed(Exception):
"""
Exception raised by ArScene estimate_pose method when the pose can't be estimated due to unconsistencies.
@@ -479,7 +503,7 @@ class ArScene():
# Pose estimation fails when no marker is detected
if len(detected_markers) == 0:
-
+
raise PoseEstimationFailed('No marker detected')
scene_markers, _ = self.aruco_scene.filter_markers(detected_markers)
@@ -575,6 +599,11 @@ class ArScene():
built AOI 2D scene
"""
+ # Check aruco aoi is defined
+ if len(self.aruco_aoi) == 0:
+
+ raise SceneProjectionFailed('No aruco aoi is defined')
+
# AOI projection fails when no marker is detected
if len(detected_markers) == 0:
@@ -713,10 +742,21 @@ class ArScreen():
"""
GazeFeatures.AOIScanStepError
+
+ Returns:
+ gaze_movement: identified gaze movement (if gaze_movement_identifier is intanciated)
+ scan_step: new scan step (if scan_path is intanciated)
+ aoi_scan_step: new scan step (if aoi_scan_path is intanciated)
"""
self.__gaze_position = inner_gaze_position
+ # Prepare return
+ gaze_movement = None
+ look_at = self.name
+ scan_step = None
+ aoi_scan_step = None
+
# Identify gaze movement
if self.gaze_movement_identifier:
@@ -726,7 +766,6 @@ class ArScreen():
if GazeFeatures.is_fixation(gaze_movement):
# Does the fixation match an AOI?
- look_at = self.name
for name, aoi in self.aoi_2d_scene.items():
_, _, circle_ratio = aoi.circle_intersection(gaze_movement.focus, gaze_movement.deviation_max)
@@ -746,10 +785,10 @@ class ArScreen():
# Append fixation to aoi scan path
if self.aoi_scan_path != None:
- self.__aoi_scan_step = self.aoi_scan_path.append_fixation(timestamp, gaze_movement, look_at)
+ aoi_scan_step = self.aoi_scan_path.append_fixation(timestamp, gaze_movement, look_at)
# Analyze aoi scan path
- if self.__aoi_scan_step and len(self.aoi_scan_path) > 1:
+ if aoi_scan_step and len(self.aoi_scan_path) > 1:
for aoi_scan_path_analyzer_type, aoi_scan_path_analyzer in self.aoi_scan_path_analyzers.items():
@@ -760,10 +799,10 @@ class ArScreen():
# Append saccade to scan path
if self.scan_path != None:
- self.__scan_step = self.scan_path.append_saccade(timestamp, gaze_movement)
+ scan_step = self.scan_path.append_saccade(timestamp, gaze_movement)
# Analyze aoi scan path
- if self.__scan_step and len(self.scan_path) > 1:
+ if scan_step and len(self.scan_path) > 1:
for scan_path_analyzer_type, scan_path_analyzer in self.scan_path_analyzers.items():
@@ -778,3 +817,6 @@ class ArScreen():
if self.heatmap:
self.heatmap.update(self.__gaze_position.value, sigma=0.05)
+
+ # Return
+ return gaze_movement, look_at, scan_step, aoi_scan_step
diff --git a/src/argaze/utils/demo_ar_features_run.py b/src/argaze/utils/demo_ar_features_run.py
index 39c69e7..0f2c2c2 100644
--- a/src/argaze/utils/demo_ar_features_run.py
+++ b/src/argaze/utils/demo_ar_features_run.py
@@ -76,9 +76,12 @@ def main():
cv2.putText(video_image, f'Error: {e}', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
# Draw environment
- cv2.imshow(ar_environment.name, ar_environment.image)
+ ar_environment.draw(video_image)
- # Draw each screens
+ # Display environment
+ cv2.imshow(ar_environment.name, video_image)
+
+ # Draw and display each screens
for scene_name, screen_name, screen in ar_environment.screens:
image = screen.background.copy()
@@ -95,6 +98,7 @@ def main():
# Draw looked AOI
screen.aoi_2d_scene.draw_circlecast(image, screen.current_gaze_movement.focus, screen.current_gaze_movement.deviation_max, base_color=(0, 0, 0), matching_color=(255, 255, 255))
+ # Display screen
cv2.imshow(f'{scene_name}:{screen_name}', image)
# Stop by pressing 'Esc' key