aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/argaze/ArFeatures.py123
1 files changed, 71 insertions, 52 deletions
diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py
index 07341de..5fa5cde 100644
--- a/src/argaze/ArFeatures.py
+++ b/src/argaze/ArFeatures.py
@@ -173,7 +173,8 @@ class ArLayer():
except KeyError:
- new_aoi_scene = AOIFeatures.AOIScene()
+ # Add AOI 2D Scene by default
+ new_aoi_scene = AOI2DScene.AOI2DScene()
# Looked aoi validity threshold
try:
@@ -186,7 +187,7 @@ class ArLayer():
# Edit expected AOI list by removing AOI with name equals to layer name
expected_aois = list(new_aoi_scene.keys())
-
+
if new_layer_name in expected_aois:
expected_aois.remove(new_layer_name)
@@ -1053,9 +1054,9 @@ class ArScene():
# Preprocess orthogonal projection to speed up further processings
self.__orthogonal_projection_cache = {}
- for name, layer in self.layers.items():
+ for layer_name, layer in self.layers.items():
- self.__orthogonal_projection_cache[name] = self.aoi_scene.orthogonal_projection
+ self.__orthogonal_projection_cache[layer_name] = layer.aoi_scene.orthogonal_projection
def __str__(self) -> str:
"""
@@ -1158,18 +1159,20 @@ class ArScene():
# Look for AOI with same frame name
aoi_frame = None
+ aoi_frame_found = False
for layer_name, layer in new_layers.items():
try:
aoi_frame = layer.aoi_scene[frame_name]
+ aoi_frame_found = True
except KeyError:
# AOI name should be unique
break
- if aoi_frame:
+ if aoi_frame_found:
# Project and reframe each layers into corresponding frame layers
for frame_layer_name, frame_layer in new_frame.layers.items():
@@ -1178,7 +1181,10 @@ class ArScene():
layer = new_layers[frame_layer_name]
- frame_layer.aoi_scene = layer.aoi_scene.orthogonal_projection.reframe(aoi_frame, new_frame.size)
+ layer_aoi_scene_projection = layer.aoi_scene.orthogonal_projection
+ aoi_frame_projection = layer_aoi_scene_projection[frame_name]
+
+ frame_layer.aoi_scene = layer_aoi_scene_projection.reframe(aoi_frame_projection, new_frame.size)
if frame_layer.aoi_scan_path != None:
@@ -1195,7 +1201,7 @@ class ArScene():
continue
# Append new frame
- new_frames[name] = new_frame
+ new_frames[frame_name] = new_frame
except KeyError:
@@ -1266,7 +1272,7 @@ class ArScene():
return tvec, rmat, 'estimate_pose_from_markers', consistent_markers
- def project(self, tvec: numpy.array, rvec: numpy.array, visual_hfov: float = 0.) -> dict:
+ def project(self, tvec: numpy.array, rvec: numpy.array, visual_hfov: float = 0.) -> Tuple[str, AOI2DScene.AOI2DScene]:
"""Project layers according estimated pose and optional horizontal field of view clipping angle.
Parameters:
@@ -1275,11 +1281,10 @@ class ArScene():
visual_hfov: horizontal field of view clipping angle
Returns:
- layer_projections: dictionary of AOI2DScene projection
+ layer_name: name of projected layer
+ layer_projection: AOI2DScene projection
"""
- layer_projections = {}
-
for name, layer in self.layers.items():
# Clip AOI out of the visual horizontal field of view (optional)
@@ -1302,9 +1307,7 @@ class ArScene():
aoi_scene_copy = layer.aoi_scene.copy()
# Project layer aoi scene
- layer_projections[name] = aoi_scene_copy.project(tvec, rvec, self.parent.aruco_detector.optic_parameters.K)
-
- return projected_layers
+ yield name, aoi_scene_copy.project(tvec, rvec, self.parent.aruco_detector.optic_parameters.K)
def build_aruco_aoi_scene(self, detected_markers) -> AOI2DScene.AOI2DScene:
"""
@@ -1485,23 +1488,25 @@ class ArEnvironment():
# Setup expected aoi of each camera frame layer aoi scan path with the aoi of corresponding scene layer
if new_camera_frame != None:
- for camera_frame_layer_name, camera_frame_layer in new_camera_frame.layers.items():
+ for camera_frame_layer_name, camera_frame_layer in new_camera_frame.layers.items():
- all_aoi_list = []
+ if camera_frame_layer.aoi_scan_path != None:
- for scene_name, scene in new_scenes.items():
+ all_aoi_list = []
- try:
+ for scene_name, scene in new_scenes.items():
- scene_layer = scene.layers[camera_frame_layer_name]
+ try:
- all_aoi_list.extend(list(scene_layer.aoi_scene.keys()))
+ scene_layer = scene.layers[camera_frame_layer_name]
- except KeyError:
+ all_aoi_list.extend(list(scene_layer.aoi_scene.keys()))
+
+ except KeyError:
- continue
+ continue
- camera_frame_layer.aoi_scan_path.expected_aois = all_aoi_list
+ camera_frame_layer.aoi_scan_path.expected_aois = all_aoi_list
# Create new environment
return ArEnvironment(new_environment_name, new_aruco_detector, new_camera_frame, new_scenes)
@@ -1584,8 +1589,10 @@ class ArEnvironment():
# Fill camera frame background with image
self.camera_frame.background = image
- # Clear former scenes projection into camera frame
- self.camera_frame.aoi_2d_scene = AOI2DScene.AOI2DScene()
+ # Clear former layers projection into camera frame
+ for came_layer_name, camera_layer in self.camera_frame.layers.items():
+
+ camera_layer.aoi_scene = AOI2DScene.AOI2DScene()
# Store exceptions for each scene
exceptions = {}
@@ -1615,7 +1622,15 @@ class ArEnvironment():
tvec, rmat, _, _ = scene.estimate_pose(self.aruco_detector.detected_markers)
# Project scene into camera frame according estimated pose
- self.camera_frame.aoi_2d_scene |= scene.project(tvec, rmat)
+ for layer_name, layer_projection in scene.project(tvec, rmat):
+
+ try:
+
+ self.camera_frame.layers[layer_name].aoi_scene |= layer_projection
+
+ except KeyError:
+
+ pass
# Store exceptions and continue
except Exception as e:
@@ -1649,28 +1664,30 @@ class ArEnvironment():
# Project gaze position into camera frame
yield self.camera_frame, self.camera_frame.look(timestamp, gaze_position)
- # Project gaze position into each frames if possible
+ # Project gaze position into each frame if possible
for frame in self.frames:
- # Is there an AOI with the same frame name projected into camera frame ?
- try:
+ # Is there an AOI inside camera frame layers projection which its name equals to a frame name?
+ for camera_layer_name, camera_layer in self.camera_frame.layers.items():
- aoi_2d = self.camera_frame.aoi_2d_scene[frame.name]
+ try:
- # TODO: Add option to use gaze precision circle
- if aoi_2d.contains_point(gaze_position.value):
+ aoi_2d = camera_layer.aoi_scene[frame.name]
- inner_x, inner_y = aoi_2d.clockwise().inner_axis(gaze_position.value)
+ # TODO: Add option to use gaze precision circle
+ if aoi_2d.contains_point(gaze_position.value):
- # QUESTION: How to project gaze precision?
- inner_gaze_position = GazeFeatures.GazePosition((inner_x, inner_y))
-
- yield frame, frame.look(timestamp, inner_gaze_position * frame.size)
+ inner_x, inner_y = aoi_2d.clockwise().inner_axis(gaze_position.value)
+
+ # QUESTION: How to project gaze precision?
+ inner_gaze_position = GazeFeatures.GazePosition((inner_x, inner_y))
+
+ yield frame, frame.look(timestamp, inner_gaze_position * frame.size)
- # Ignore missing aoi frame projection
- except KeyError:
+ # Ignore missing aoi in camera frame layer projection
+ except KeyError:
- pass
+ pass
# Unlock camera frame exploitation
self.__camera_frame_lock.release()
@@ -1688,24 +1705,26 @@ class ArEnvironment():
# Lock camera frame exploitation
self.__camera_frame_lock.acquire()
- # Project image if possible
+ # Project image into each frame if possible
for frame in self.frames:
- # Is there an AOI with the same frame name projected into camera frame ?
- try:
+ # Is there an AOI inside camera frame layers projection which its name equals to a frame name?
+ for camera_layer_name, camera_layer in self.camera_frame.layers.items():
- aoi_2d = self.camera_frame.aoi_2d_scene[frame.name]
+ try:
- # Apply perspective transform algorithm to fill aoi frame background
- width, height = frame.size
- destination = numpy.float32([[0, height],[width, height],[width, 0],[0, 0]])
- mapping = cv2.getPerspectiveTransform(aoi_2d.astype(numpy.float32), destination)
- frame.background = cv2.warpPerspective(self.camera_frame.background, mapping, (width, height))
+ aoi_2d = camera_layer.aoi_scene[frame.name]
- # Ignore missing frame projection
- except KeyError:
+ # Apply perspective transform algorithm to fill aoi frame background
+ width, height = frame.size
+ destination = numpy.float32([[0, height],[width, height],[width, 0],[0, 0]])
+ mapping = cv2.getPerspectiveTransform(aoi_2d.astype(numpy.float32), destination)
+ frame.background = cv2.warpPerspective(self.camera_frame.background, mapping, (width, height))
- pass
+ # Ignore missing frame projection
+ except KeyError:
+
+ pass
# Unlock camera frame exploitation
self.__camera_frame_lock.release()