aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorThéo de la Hogue2023-11-07 15:54:45 +0100
committerThéo de la Hogue2023-11-07 15:54:45 +0100
commit78ce6ffc892ef7d64a8d1da0dbdfcbf34d214bbd (patch)
tree4509c14aa1800d2666c50c47549a044e5a6c11d0 /src
parentbc9257268bb54ea68f777cbb853dc6498274dd99 (diff)
parentf8b1a36c9e486ef19f62159475b9bf19a5b90a03 (diff)
downloadargaze-78ce6ffc892ef7d64a8d1da0dbdfcbf34d214bbd.zip
argaze-78ce6ffc892ef7d64a8d1da0dbdfcbf34d214bbd.tar.gz
argaze-78ce6ffc892ef7d64a8d1da0dbdfcbf34d214bbd.tar.bz2
argaze-78ce6ffc892ef7d64a8d1da0dbdfcbf34d214bbd.tar.xz
Merge branch 'master' of ssh://git.recherche.enac.fr/interne-ihm-aero/eye-tracking/argaze
Diffstat (limited to 'src')
-rw-r--r--src/argaze.test/AreaOfInterest/AOI2DScene.py6
-rw-r--r--src/argaze.test/AreaOfInterest/AOI3DScene.py6
-rw-r--r--src/argaze.test/AreaOfInterest/AOIFeatures.py8
-rw-r--r--src/argaze.test/GazeAnalysis/ExploreExploitRatio.py (renamed from src/argaze.test/GazeAnalysis/ExploitExploreRatio.py)6
-rw-r--r--src/argaze.test/GazeFeatures.py4
-rw-r--r--src/argaze/ArFeatures.py322
-rw-r--r--src/argaze/ArUcoMarkers/ArUcoCamera.py42
-rw-r--r--src/argaze/ArUcoMarkers/ArUcoDetector.py108
-rw-r--r--src/argaze/ArUcoMarkers/ArUcoMarker.py4
-rw-r--r--src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py412
-rw-r--r--src/argaze/ArUcoMarkers/ArUcoScene.py32
-rw-r--r--src/argaze/AreaOfInterest/AOI2DScene.py119
-rw-r--r--src/argaze/AreaOfInterest/AOI3DScene.py19
-rw-r--r--src/argaze/AreaOfInterest/AOIFeatures.py144
-rw-r--r--src/argaze/DataStructures.py25
-rw-r--r--src/argaze/GazeAnalysis/Basic.py23
-rw-r--r--src/argaze/GazeAnalysis/DeviationCircleCoverage.py35
-rw-r--r--src/argaze/GazeAnalysis/DispersionThresholdIdentification.py13
-rw-r--r--src/argaze/GazeAnalysis/ExploreExploitRatio.py (renamed from src/argaze/GazeAnalysis/ExploitExploreRatio.py)21
-rw-r--r--src/argaze/GazeAnalysis/FocusPointInside.py8
-rw-r--r--src/argaze/GazeAnalysis/KCoefficient.py29
-rw-r--r--src/argaze/GazeAnalysis/LinearRegression.py107
-rw-r--r--src/argaze/GazeAnalysis/TransitionMatrix.py2
-rw-r--r--src/argaze/GazeAnalysis/VelocityThresholdIdentification.py13
-rw-r--r--src/argaze/GazeAnalysis/__init__.py2
-rw-r--r--src/argaze/GazeFeatures.py146
-rw-r--r--src/argaze/utils/aruco_markers_group_export.py160
-rw-r--r--src/argaze/utils/aruco_markers_scene_export.py176
-rw-r--r--src/argaze/utils/demo_aruco_markers_run.py76
-rw-r--r--src/argaze/utils/demo_data/aoi_2d_scene.json18
-rw-r--r--src/argaze/utils/demo_data/aoi_3d_scene.obj50
-rw-r--r--src/argaze/utils/demo_data/demo_aruco_markers_setup.json54
-rw-r--r--src/argaze/utils/demo_data/demo_gaze_analysis_setup.json35
-rw-r--r--src/argaze/utils/demo_gaze_analysis_run.py26
34 files changed, 1290 insertions, 961 deletions
diff --git a/src/argaze.test/AreaOfInterest/AOI2DScene.py b/src/argaze.test/AreaOfInterest/AOI2DScene.py
index 4e96e98..10ff430 100644
--- a/src/argaze.test/AreaOfInterest/AOI2DScene.py
+++ b/src/argaze.test/AreaOfInterest/AOI2DScene.py
@@ -187,14 +187,14 @@ class TestTimeStampedAOIScenesClass(unittest.TestCase):
aoi_2D_B = AOIFeatures.AreaOfInterest([[1, 1], [1, 2], [2, 2], [2, 1]])
aoi_2d_scene = AOI2DScene.AOI2DScene({"A": aoi_2D_A, "B": aoi_2D_B})
- ts_aois_scenes = AOIFeatures.TimeStampedAOIScenes()
+ ts_aoi_scenes = AOIFeatures.TimeStampedAOIScenes()
- ts_aois_scenes[0] = aoi_2d_scene
+ ts_aoi_scenes[0] = aoi_2d_scene
# Check that only AOIScene can be added
with self.assertRaises(AssertionError):
- ts_aois_scenes[1] = "This string is not an AOI2DScene"
+ ts_aoi_scenes[1] = "This string is not an AOI2DScene"
if __name__ == '__main__':
diff --git a/src/argaze.test/AreaOfInterest/AOI3DScene.py b/src/argaze.test/AreaOfInterest/AOI3DScene.py
index b386432..d09f2a8 100644
--- a/src/argaze.test/AreaOfInterest/AOI3DScene.py
+++ b/src/argaze.test/AreaOfInterest/AOI3DScene.py
@@ -107,14 +107,14 @@ class TestTimeStampedAOIScenesClass(unittest.TestCase):
aoi_3D_B = AOIFeatures.AreaOfInterest([[1, 1, 0], [1, 2, 0], [2, 2, 0], [2, 1, 0]])
aoi_3d_scene = AOI3DScene.AOI3DScene({"A": aoi_3D_A, "B": aoi_3D_B})
- ts_aois_scenes = AOIFeatures.TimeStampedAOIScenes()
+ ts_aoi_scenes = AOIFeatures.TimeStampedAOIScenes()
- ts_aois_scenes[0] = aoi_3d_scene
+ ts_aoi_scenes[0] = aoi_3d_scene
# Check that only AOIScene can be added
with self.assertRaises(AssertionError):
- ts_aois_scenes[1] = "This string is not an AOI3DScene"
+ ts_aoi_scenes[1] = "This string is not an AOI3DScene"
if __name__ == '__main__':
diff --git a/src/argaze.test/AreaOfInterest/AOIFeatures.py b/src/argaze.test/AreaOfInterest/AOIFeatures.py
index cc75ed8..cb8fb52 100644
--- a/src/argaze.test/AreaOfInterest/AOIFeatures.py
+++ b/src/argaze.test/AreaOfInterest/AOIFeatures.py
@@ -118,13 +118,17 @@ class TestAreaOfInterestClass(unittest.TestCase):
aoi_2D = AOIFeatures.AreaOfInterest([[0, 0], [0, 2], [2, 2], [2, 0]])
- self.assertEqual(aoi_2D.inner_axis((1, 1)), (0.5, 0.5))
+ self.assertEqual(aoi_2D.inner_axis(1, 1), (0.5, 0.5))
def test_outter_axis(self):
aoi_2D = AOIFeatures.AreaOfInterest([[0, 0], [0, 2], [2, 2], [2, 0]])
- self.assertEqual(aoi_2D.outter_axis((0.5, 0.5)), (1, 1))
+ self.assertEqual(aoi_2D.outter_axis(0.5, 0.5), (1, 1))
+
+ aoi_3D = AOIFeatures.AreaOfInterest([[1, 0, 0], [1, 0, 2], [1, 2, 2], [1, 2, 0]])
+
+ self.assertEqual(aoi_3D.outter_axis(0.5, 0.5), (1, 1, 1))
def test_circle_intersection(self):
diff --git a/src/argaze.test/GazeAnalysis/ExploitExploreRatio.py b/src/argaze.test/GazeAnalysis/ExploreExploitRatio.py
index 0e6b74a..7b323d4 100644
--- a/src/argaze.test/GazeAnalysis/ExploitExploreRatio.py
+++ b/src/argaze.test/GazeAnalysis/ExploreExploitRatio.py
@@ -10,7 +10,7 @@ __license__ = "BSD"
import unittest
from argaze import GazeFeatures
-from argaze.GazeAnalysis import ExploitExploreRatio
+from argaze.GazeAnalysis import ExploreExploitRatio
from argaze.utils import UtilsFeatures
GazeFeaturesTest = UtilsFeatures.importFromTestPackage('GazeFeatures')
@@ -21,7 +21,7 @@ class TestScanPathAnalyzer(unittest.TestCase):
def test_analyze(self):
"""Test analyze method."""
- xxr_analyzer = ExploitExploreRatio.ScanPathAnalyzer()
+ xxr_analyzer = ExploreExploitRatio.ScanPathAnalyzer()
scan_path = GazeFeaturesTest.build_scan_path(10)
@@ -31,7 +31,7 @@ class TestScanPathAnalyzer(unittest.TestCase):
xxr_analyzer.analyze(scan_path)
# Check exploit explore ratio: it should greater than 1 because of build_scan_path
- self.assertGreaterEqual(xxr_analyzer.exploit_explore_ratio, 1.)
+ self.assertGreaterEqual(xxr_analyzer.explore_exploit_ratio, 1.)
if __name__ == '__main__':
diff --git a/src/argaze.test/GazeFeatures.py b/src/argaze.test/GazeFeatures.py
index d609dd2..b41c7c7 100644
--- a/src/argaze.test/GazeFeatures.py
+++ b/src/argaze.test/GazeFeatures.py
@@ -497,10 +497,10 @@ class TestAOIScanStepClass(unittest.TestCase):
aoi_scan_step = GazeFeatures.AOIScanStep(movements, 'Test')
-def build_aoi_scan_path(expected_aois, aoi_path):
+def build_aoi_scan_path(expected_aoi, aoi_path):
"""Build AOI scan path"""
- aoi_scan_path = GazeFeatures.AOIScanPath(expected_aois)
+ aoi_scan_path = GazeFeatures.AOIScanPath(expected_aoi)
# Append a hidden last step to allow last given step creation
aoi_path.append(aoi_path[-2])
diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py
index 96976c2..5ec6b7e 100644
--- a/src/argaze/ArFeatures.py
+++ b/src/argaze/ArFeatures.py
@@ -7,7 +7,7 @@ __credits__ = []
__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)"
__license__ = "BSD"
-from typing import TypeVar, Tuple
+from typing import TypeVar, Tuple, Any
from dataclasses import dataclass, field
import json
import os
@@ -96,7 +96,7 @@ DEFAULT_ARLAYER_DRAW_PARAMETERS = {
@dataclass
class ArLayer():
"""
- Defines a space where to make matching of gaze movements and AOIs and inside which those matchings need to be analyzed.
+ Defines a space where to make matching of gaze movements and AOI and inside which those matchings need to be analyzed.
Parameters:
name: name of the layer
@@ -180,6 +180,11 @@ class ArLayer():
new_aoi_scene = AOIFeatures.AOIScene.from_json(filepath)
+ # SVG file format for 2D dimension only
+ if file_format == 'svg':
+
+ new_aoi_scene = AOI2DScene.AOI2DScene.from_svg(filepath)
+
# OBJ file format for 3D dimension only
elif file_format == 'obj':
@@ -192,14 +197,16 @@ class ArLayer():
except KeyError:
+ pass
+
# Add AOI 2D Scene by default
new_aoi_scene = AOI2DScene.AOI2DScene()
# Edit expected AOI list by removing AOI with name equals to layer name
- expected_aois = list(new_aoi_scene.keys())
+ expected_aoi = list(new_aoi_scene.keys())
- if new_layer_name in expected_aois:
- expected_aois.remove(new_layer_name)
+ if new_layer_name in expected_aoi:
+ expected_aoi.remove(new_layer_name)
# Load aoi matcher
try:
@@ -223,13 +230,13 @@ class ArLayer():
try:
new_aoi_scan_path_data = layer_data.pop('aoi_scan_path')
- new_aoi_scan_path_data['expected_aois'] = expected_aois
+ new_aoi_scan_path_data['expected_aoi'] = expected_aoi
new_aoi_scan_path = GazeFeatures.AOIScanPath(**new_aoi_scan_path_data)
except KeyError:
new_aoi_scan_path_data = {}
- new_aoi_scan_path_data['expected_aois'] = expected_aois
+ new_aoi_scan_path_data['expected_aoi'] = expected_aoi
new_aoi_scan_path = None
# Load AOI scan path analyzers
@@ -367,6 +374,9 @@ class ArLayer():
# Lock layer exploitation
self.__look_lock.acquire()
+ # Store look execution start date
+ look_start = time.perf_counter()
+
# Update current gaze movement
self.__gaze_movement = gaze_movement
@@ -445,23 +455,13 @@ class ArLayer():
looked_aoi = None
aoi_scan_path_analysis = {}
exception = e
+
+ # Assess total execution time in ms
+ execution_times['total'] = (time.perf_counter() - look_start) * 1e3
# Unlock layer exploitation
self.__look_lock.release()
- # Sum all execution times
- total_execution_time = 0
-
- if execution_times['aoi_matcher']:
-
- total_execution_time += execution_times['aoi_matcher']
-
- for _, aoi_scan_path_analysis_time in execution_times['aoi_scan_step_analyzers'].items():
-
- total_execution_time += aoi_scan_path_analysis_time
-
- execution_times['total'] = total_execution_time
-
# Return look data
return looked_aoi, aoi_scan_path_analysis, execution_times, exception
@@ -471,7 +471,7 @@ class ArLayer():
Parameters:
draw_aoi_scene: AreaOfInterest.AOI2DScene.draw parameters (if None, no aoi scene is drawn)
- draw_aoi_matching: AOIMatcher.draw parameters (which depends of the loaded aoi matcher module, if None, no aoi matching is drawn)
+ draw_aoi_matching: AOIMatcher.draw parameters (which depends of the loaded aoi matcher module, if None, no aoi matching is drawn)
"""
# Use draw_parameters attribute if no parameters
@@ -484,7 +484,7 @@ class ArLayer():
# Draw aoi if required
if draw_aoi_scene is not None:
-
+
self.aoi_scene.draw(image, **draw_aoi_scene)
# Draw aoi matching if required
@@ -523,7 +523,8 @@ class ArFrame():
Parameters:
name: name of the frame
- size: defines the dimension of the rectangular area where gaze positions are projected.
+ size: defines the dimension of the rectangular area where gaze positions are projected
+ gaze_position_calibrator: gaze position calibration algoritm
gaze_movement_identifier: gaze movement identification algorithm
filter_in_progress_identification: ignore in progress gaze movement identification
scan_path: scan path object
@@ -537,6 +538,7 @@ class ArFrame():
name: str
size: tuple[int] = field(default=(1, 1))
+ gaze_position_calibrator: GazeFeatures.GazePositionCalibrator = field(default_factory=GazeFeatures.GazePositionCalibrator)
gaze_movement_identifier: GazeFeatures.GazeMovementIdentifier = field(default_factory=GazeFeatures.GazeMovementIdentifier)
filter_in_progress_identification: bool = field(default=True)
scan_path: GazeFeatures.ScanPath = field(default_factory=GazeFeatures.ScanPath)
@@ -600,6 +602,31 @@ class ArFrame():
new_frame_size = (0, 0)
+ # Load gaze position calibrator
+ try:
+
+ gaze_position_calibrator_value = frame_data.pop('gaze_position_calibrator')
+
+ # str: relative path to file
+ if type(gaze_position_calibrator_value) == str:
+
+ filepath = os.path.join(working_directory, gaze_position_calibrator_value)
+ file_format = filepath.split('.')[-1]
+
+ # JSON file format
+ if file_format == 'json':
+
+ new_gaze_position_calibrator = GazeFeatures.GazePositionCalibrator.from_json(filepath)
+
+ # dict:
+ else:
+
+ new_gaze_position_calibrator = GazeFeatures.GazePositionCalibrator.from_dict(gaze_position_calibrator_value)
+
+ except KeyError:
+
+ new_gaze_position_calibrator = None
+
# Load gaze movement identifier
try:
@@ -728,11 +755,6 @@ class ArFrame():
# Create layer
new_layer = ArLayer.from_dict(layer_data, working_directory)
- # Project 3D aoi scene layer to get only 2D aoi scene
- if new_layer.aoi_scene.dimension == 3:
-
- new_layer.aoi_scene = new_layer.aoi_scene.orthogonal_projection * new_frame_size
-
# Append new layer
new_layers[layer_name] = new_layer
@@ -761,6 +783,7 @@ class ArFrame():
# Create frame
return ArFrame(new_frame_name, \
new_frame_size, \
+ new_gaze_position_calibrator, \
new_gaze_movement_identifier, \
filter_in_progress_identification, \
new_scan_path, \
@@ -808,7 +831,7 @@ class ArFrame():
return self.__ts_logs
- def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition = GazeFeatures.UnvalidGazePosition()) -> Tuple[GazeFeatures.GazeMovement, dict, dict, dict, Exception]:
+ def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition = GazeFeatures.UnvalidGazePosition()) -> Tuple[GazeFeatures.GazePosition, GazeFeatures.GazeMovement, dict, dict, dict, Exception]:
"""
Project gaze position into frame.
@@ -820,6 +843,7 @@ class ArFrame():
gaze_position: gaze position to project
Returns:
+ current_gaze_position: calibrated gaze position if gaze_position_calibrator is instanciated else, given gaze position.
identified_gaze_movement: identified gaze movement from incoming consecutive timestamped gaze positions if gaze_movement_identifier is instanciated. Current gaze movement if filter_in_progress_identification is False.
scan_path_analysis: scan path analysis at each new scan step if scan_path is instanciated.
layers_analysis: aoi scan path analysis at each new aoi scan step for each instanciated layers aoi scan path.
@@ -830,8 +854,8 @@ class ArFrame():
# Lock frame exploitation
self.__look_lock.acquire()
- # Update current gaze position
- self.__gaze_position = gaze_position
+ # Store look execution start date
+ look_start = time.perf_counter()
# No gaze movement identified by default
identified_gaze_movement = GazeFeatures.UnvalidGazeMovement()
@@ -855,6 +879,16 @@ class ArFrame():
try:
+ # Apply gaze position calibration
+ if self.gaze_position_calibrator is not None:
+
+ self.__gaze_position = self.gaze_position_calibrator.apply(gaze_position)
+
+ # Or update gaze position at least
+ else:
+
+ self.__gaze_position = gaze_position
+
# Identify gaze movement
if self.gaze_movement_identifier is not None:
@@ -944,45 +978,29 @@ class ArFrame():
print('Warning: the following error occurs in ArFrame.look method:', e)
+ self.__gaze_position = GazeFeatures.UnvalidGazePosition()
identified_gaze_movement = GazeFeatures.UnvalidGazeMovement()
scan_step_analysis = {}
layer_analysis = {}
exception = e
-
- # Unlock frame exploitation
- self.__look_lock.release()
-
- # Sum all execution times
- total_execution_time = 0
-
- if execution_times['gaze_movement_identifier']:
-
- total_execution_time += execution_times['gaze_movement_identifier']
-
- for _, scan_step_analysis_time in execution_times['scan_step_analyzers'].items():
-
- total_execution_time += scan_step_analysis_time
-
- if execution_times['heatmap']:
- total_execution_time += execution_times['heatmap']
+ # Assess total execution time in ms
+ execution_times['total'] = (time.perf_counter() - look_start) * 1e3
- for _, layer_execution_times in execution_times['layers'].items():
-
- total_execution_time += layer_execution_times['total']
-
- execution_times['total'] = total_execution_time
+ # Unlock frame exploitation
+ self.__look_lock.release()
# Return look data
- return identified_gaze_movement, scan_step_analysis, layer_analysis, execution_times, exception
+ return self.__gaze_position, identified_gaze_movement, scan_step_analysis, layer_analysis, execution_times, exception
- def __image(self, background_weight: float = None, heatmap_weight: float = None, draw_scan_path: dict = None, draw_layers: dict = None, draw_gaze_positions: dict = None, draw_fixations: dict = None, draw_saccades: dict = None) -> numpy.array:
+ def __image(self, background_weight: float = None, heatmap_weight: float = None, draw_gaze_position_calibrator: dict = None, draw_scan_path: dict = None, draw_layers: dict = None, draw_gaze_positions: dict = None, draw_fixations: dict = None, draw_saccades: dict = None) -> numpy.array:
"""
Get background image with overlaid visualisations.
Parameters:
background_weight: weight of background overlay
heatmap_weight: weight of heatmap overlay
+ draw_gaze_position_calibrator: [GazeFeatures.GazePositionCalibrator.draw](argaze.md/#argaze.GazeFeatures.GazePositionCalibrator.draw) parameters (if None, nothing is drawn)
draw_scan_path: [GazeFeatures.ScanPath.draw](argaze.md/#argaze.GazeFeatures.ScanPath.draw) parameters (if None, no scan path is drawn)
draw_layers: dictionary of [ArLayer.draw](argaze.md/#argaze.ArFeatures.ArLayer.draw) parameters per layer (if None, no layer is drawn)
draw_gaze_positions: [GazeFeatures.GazePosition.draw](argaze.md/#argaze.GazeFeatures.GazePosition.draw) parameters (if None, no gaze position is drawn)
@@ -1015,18 +1033,16 @@ class ArFrame():
image = numpy.full((self.size[1], self.size[0], 3), 0).astype(numpy.uint8)
+ # Draw gaze position calibrator
+ if draw_gaze_position_calibrator is not None:
+
+ self.gaze_position_calibrator.draw(image, size=self.size, **draw_gaze_position_calibrator)
+
# Draw scan path if required
if draw_scan_path is not None and self.scan_path is not None:
self.scan_path.draw(image, **draw_scan_path)
- # Draw layers if required
- if draw_layers is not None:
-
- for layer_name, draw_layer in draw_layers.items():
-
- self.layers[layer_name].draw(image, **draw_layer)
-
# Draw current fixation if required
if draw_fixations is not None and self.gaze_movement_identifier is not None:
@@ -1037,6 +1053,13 @@ class ArFrame():
self.gaze_movement_identifier.current_saccade.draw(image, **draw_saccades)
+ # Draw layers if required
+ if draw_layers is not None:
+
+ for layer_name, draw_layer in draw_layers.items():
+
+ self.layers[layer_name].draw(image, **draw_layer)
+
# Draw current gaze position if required
if draw_gaze_positions is not None:
@@ -1047,7 +1070,7 @@ class ArFrame():
return image
- def image(self, **kwargs) -> numpy.array:
+ def image(self, **kwargs: dict) -> numpy.array:
"""
Get frame image.
@@ -1067,15 +1090,10 @@ class ArScene():
Define abstract Augmented Reality scene with ArLayers and ArFrames inside.
Parameters:
-
name: name of the scene
-
layers: dictionary of ArLayers to project once the pose is estimated: see [project][argaze.ArFeatures.ArScene.project] function below.
-
frames: dictionary to ArFrames to project once the pose is estimated: see [project][argaze.ArFeatures.ArScene.project] function below.
-
angle_tolerance: Optional angle error tolerance to validate marker pose in degree used into [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function.
-
distance_tolerance: Optional distance error tolerance to validate marker pose in centimeter used into [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function.
"""
name: str
@@ -1099,13 +1117,6 @@ class ArScene():
frame.parent = self
- # Preprocess orthogonal projection to speed up further processings
- self.__orthogonal_projection_cache = {}
-
- for layer_name, layer in self.layers.items():
-
- self.__orthogonal_projection_cache[layer_name] = layer.aoi_scene.orthogonal_projection
-
def __str__(self) -> str:
"""
Returns:
@@ -1184,54 +1195,70 @@ class ArScene():
for frame_name, frame_data in scene_data.pop('frames').items():
- # Append name
- frame_data['name'] = frame_name
+ # str: relative path to file
+ if type(frame_data) == str:
+
+ filepath = os.path.join(working_directory, frame_data)
+ file_format = filepath.split('.')[-1]
+
+ # JSON file format for 2D or 3D dimension
+ if file_format == 'json':
- # Create frame
- new_frame = ArFrame.from_dict(frame_data, working_directory)
+ new_frame = ArFrame.from_json(filepath)
- # Look for AOI with same frame name
- aoi_frame = None
- aoi_frame_found = False
- for layer_name, layer in new_layers.items():
+ # dict:
+ else:
+
+ # Append name
+ frame_data['name'] = frame_name
+
+ new_frame = ArFrame.from_dict(frame_data, working_directory)
+
+ # Look for a scene layer with an AOI named like the frame
+ for scene_layer_name, scene_layer in new_layers.items():
try:
- aoi_frame = layer.aoi_scene[frame_name]
- aoi_frame_found = True
+ frame_3d = scene_layer.aoi_scene[frame_name]
- except KeyError:
+ # Check that the frame have a layer named like this scene layer
+ aoi_2d_scene = new_frame.layers[scene_layer_name].aoi_scene
- # AOI name should be unique
- break
+ # Transform 2D frame layer AOI into 3D scene layer AOI
+ # Then, add them to scene layer
+ scene_layer.aoi_scene |= aoi_2d_scene.dimensionalize(frame_3d, new_frame.size)
- if aoi_frame_found:
+ '''DEPRECATED: but maybe still usefull?
+ # Project and reframe each layers into corresponding frame layers
+ for frame_layer_name, frame_layer in new_frame.layers.items():
- # Project and reframe each layers into corresponding frame layers
- for frame_layer_name, frame_layer in new_frame.layers.items():
+ try:
- try:
+ layer = new_layers[frame_layer_name]
+
+ layer_aoi_scene_projection = layer.aoi_scene.orthogonal_projection
+ aoi_frame_projection = layer_aoi_scene_projection[frame_name]
- layer = new_layers[frame_layer_name]
-
- layer_aoi_scene_projection = layer.aoi_scene.orthogonal_projection
- aoi_frame_projection = layer_aoi_scene_projection[frame_name]
+ frame_layer.aoi_scene = layer_aoi_scene_projection.reframe(aoi_frame_projection, new_frame.size)
- frame_layer.aoi_scene = layer_aoi_scene_projection.reframe(aoi_frame_projection, new_frame.size)
+ if frame_layer.aoi_scan_path is not None:
- if frame_layer.aoi_scan_path is not None:
+ # Edit expected AOI list by removing AOI with name equals to frame layer name
+ expected_aoi = list(layer.aoi_scene.keys())
- # Edit expected AOI list by removing AOI with name equals to frame layer name
- expected_aois = list(layer.aoi_scene.keys())
+ if frame_layer_name in expected_aoi:
+ expected_aoi.remove(frame_layer_name)
- if frame_layer_name in expected_aois:
- expected_aois.remove(frame_layer_name)
+ frame_layer.aoi_scan_path.expected_aoi = expected_aoi
- frame_layer.aoi_scan_path.expected_aois = expected_aois
+ except KeyError:
- except KeyError:
+ continue
+ '''
- continue
+ except KeyError as e:
+
+ print(e)
# Append new frame
new_frames[frame_name] = new_frame
@@ -1242,7 +1269,7 @@ class ArScene():
return ArScene(new_scene_name, new_layers, new_frames, **scene_data)
- def estimate_pose(self, detected_features) -> Tuple[numpy.array, numpy.array]:
+ def estimate_pose(self, detected_features: Any) -> Tuple[numpy.array, numpy.array]:
"""Define abstract estimate scene pose method.
Parameters:
@@ -1255,13 +1282,14 @@ class ArScene():
raise NotImplementedError('estimate_pose() method not implemented')
- def project(self, tvec: numpy.array, rvec: numpy.array, visual_hfov: float = 0.) -> Tuple[str, AOI2DScene.AOI2DScene]:
- """Project layers according estimated pose and optional horizontal field of view clipping angle.
+ def project(self, tvec: numpy.array, rvec: numpy.array, visual_hfov: float = 0., visual_vfov: float = 0.) -> Tuple[str, AOI2DScene.AOI2DScene]:
+ """Project layers according estimated pose and optional field of view clipping angles.
Parameters:
tvec: translation vector
rvec: rotation vector
visual_hfov: horizontal field of view clipping angle
+ visual_vfov: vertical field of view clipping angle
Returns:
layer_name: name of projected layer
@@ -1271,6 +1299,7 @@ class ArScene():
for name, layer in self.layers.items():
# Clip AOI out of the visual horizontal field of view (optional)
+ # TODO: use HFOV and VFOV and don't use vision_cone method
if visual_hfov > 0:
# Transform layer aoi scene into camera referential
@@ -1292,7 +1321,7 @@ class ArScene():
# Project layer aoi scene
yield name, aoi_scene_copy.project(tvec, rvec, self.parent.aruco_detector.optic_parameters.K)
- def draw(self, image: numpy.array, **kwargs):
+ def draw(self, image: numpy.array, **kwargs: dict):
"""
Draw scene into image.
@@ -1309,9 +1338,13 @@ class ArCamera(ArFrame):
Parameters:
scenes: all scenes to project into camera frame
+ visual_hfov: Optional angle in degree to clip scenes projection according visual horizontal field of view (HFOV).
+ visual_vfov: Optional angle in degree to clip scenes projection according visual vertical field of view (VFOV).
"""
scenes: dict = field(default_factory=dict)
+ visual_hfov: float = field(default=0.)
+ visual_vfov: float = field(default=0.)
def __post_init__(self):
@@ -1324,31 +1357,45 @@ class ArCamera(ArFrame):
scene.parent = self
# Setup expected aoi of each layer aoi scan path with the aoi of corresponding scene layer
+ # Edit aoi matcher exclude attribute to ignore frame aoi
for layer_name, layer in self.layers.items():
if layer.aoi_scan_path is not None:
- all_aoi_list = []
+ expected_aoi_list = []
+ exclude_aoi_list = []
for scene_name, scene in self.scenes.items():
+ # Append scene layer aoi to corresponding expected camera layer aoi
try:
scene_layer = scene.layers[layer_name]
- all_aoi_list.extend(list(scene_layer.aoi_scene.keys()))
+ expected_aoi_list.extend(list(scene_layer.aoi_scene.keys()))
except KeyError:
continue
- layer.aoi_scan_path.expected_aois = all_aoi_list
+ # Remove scene frame from expected camera layer aoi
+ # Exclude scene frame from camera layer aoi matching
+ for frame_name, frame in scene.frames.items():
+
+ try:
+
+ expected_aoi_list.remove(frame_name)
+ exclude_aoi_list.append(frame_name)
+
+ except ValueError:
+
+ continue
+
+ layer.aoi_scan_path.expected_aoi = expected_aoi_list
+ layer.aoi_matcher.exclude = exclude_aoi_list
# Init a lock to share scene projections into camera frame between multiple threads
self._frame_lock = threading.Lock()
-
- # Define public timestamp buffer to store ignored gaze positions
- self.ignored_gaze_positions = GazeFeatures.TimeStampedGazePositions()
def __str__(self) -> str:
"""
@@ -1399,24 +1446,44 @@ class ArCamera(ArFrame):
yield scene_frame
def watch(self, image: numpy.array) -> Tuple[float, dict]:
- """Detect AR features from image and project scenes into camera frame."""
+ """Detect AR features from image and project scenes into camera frame.
+
+ Returns:
+ detection time: AR features detection time in ms.
+ exception: dictionary with exception raised per scene.
+ """
raise NotImplementedError('watch() method not implemented')
def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition):
"""Project timestamped gaze position into each scene frames.
- !!! warning watch method needs to be called first.
+ Parameters:
+ timestamp: gaze position time stamp (unit does'nt matter)
+ gaze_position: GazePosition object
+
+ !!! warning
+ watch method needs to be called first.
"""
- # Can't use camera frame when it is locked
- if self._frame_lock.locked():
+ # Can't use camera frame while it is locked
+ wait_start = time.perf_counter()
+ waiting_time = 0
+
+ while self._frame_lock.locked():
- # TODO: Store ignored timestamped gaze positions for further projections
- # PB: This would imply to also store frame projections !!!
- self.ignored_gaze_positions[timestamp] = gaze_position
+ time.sleep(1e-6)
+ waiting_time = (time.perf_counter() - wait_start) * 1e3
- return None, None
+ # TODO? return waiting time?
+
+ # TODO? add timeout parameter?
+ #if waiting_time > timeout:
+ # return None, None
+
+ # DEBUG
+ #if waiting_time > 0:
+ # print(f'ArCamera: waiting {waiting_time:.3f} ms before to process gaze position at {timestamp} time.')
# Lock camera frame exploitation
self._frame_lock.acquire()
@@ -1437,7 +1504,7 @@ class ArCamera(ArFrame):
# TODO?: Should we prefer to use camera frame AOIMatcher object?
if aoi_2d.contains_point(gaze_position.value):
- inner_x, inner_y = aoi_2d.clockwise().inner_axis(gaze_position.value)
+ inner_x, inner_y = aoi_2d.clockwise().inner_axis(*gaze_position.value)
# QUESTION: How to project gaze precision?
inner_gaze_position = GazeFeatures.GazePosition((inner_x, inner_y))
@@ -1455,7 +1522,8 @@ class ArCamera(ArFrame):
def map(self):
"""Project camera frame background into scene frames background.
- .. warning:: watch method needs to be called first.
+ !!! warning
+ watch method needs to be called first.
"""
# Can't use camera frame when it is locked
@@ -1477,7 +1545,7 @@ class ArCamera(ArFrame):
# Apply perspective transform algorithm to fill aoi frame background
width, height = frame.size
- destination = numpy.float32([[0, height],[width, height],[width, 0],[0, 0]])
+ destination = numpy.float32([[0, 0], [width, 0], [width, height], [0, height]])
mapping = cv2.getPerspectiveTransform(aoi_2d.astype(numpy.float32), destination)
frame.background = cv2.warpPerspective(self.background, mapping, (width, height))
@@ -1489,7 +1557,7 @@ class ArCamera(ArFrame):
# Unlock camera frame exploitation
self._frame_lock.release()
- def image(self, **kwargs) -> numpy.array:
+ def image(self, **kwargs: dict) -> numpy.array:
"""
Get frame image.
diff --git a/src/argaze/ArUcoMarkers/ArUcoCamera.py b/src/argaze/ArUcoMarkers/ArUcoCamera.py
index 4f555fb..ed6c619 100644
--- a/src/argaze/ArUcoMarkers/ArUcoCamera.py
+++ b/src/argaze/ArUcoMarkers/ArUcoCamera.py
@@ -11,6 +11,7 @@ from typing import TypeVar, Tuple
from dataclasses import dataclass, field
import json
import os
+import time
from argaze import ArFeatures, DataStructures
from argaze.ArUcoMarkers import ArUcoMarkersDictionary, ArUcoDetector, ArUcoOpticCalibrator, ArUcoScene
@@ -37,6 +38,7 @@ class ArUcoCamera(ArFeatures.ArCamera):
"""
Define an ArCamera based on ArUco marker detection.
+ Parameters:
aruco_detector: ArUco marker detector
"""
@@ -73,7 +75,7 @@ class ArUcoCamera(ArFeatures.ArCamera):
return output
@classmethod
- def from_dict(self, aruco_camera_data, working_directory: str = None) -> ArUcoCameraType:
+ def from_dict(self, aruco_camera_data: dict, working_directory: str = None) -> ArUcoCameraType:
"""
Load ArUcoCamera from dictionary.
@@ -140,13 +142,14 @@ class ArUcoCamera(ArFeatures.ArCamera):
return ArUcoCamera.from_dict(aruco_camera_data, working_directory)
- def watch(self, image: numpy.array) -> Tuple[float, dict]:
+ def watch(self, image: numpy.array) -> Tuple[float, float, dict]:
"""Detect environment aruco markers from image and project scenes into camera frame.
Returns:
- - detection_time: aruco marker detection time in ms
- - exceptions: dictionary with exception raised per scene
- """
+ detection time: aruco marker detection time in ms.
+ projection time: scenes projection time in ms.
+ exception: dictionary with exception raised per scene.
+ """
# Detect aruco markers
detection_time = self.aruco_detector.detect_markers(image)
@@ -154,6 +157,9 @@ class ArUcoCamera(ArFeatures.ArCamera):
# Lock camera frame exploitation
self._frame_lock.acquire()
+ # Store projection execution start date
+ projection_start = time.perf_counter()
+
# Fill camera frame background with image
self.background = image
@@ -183,14 +189,11 @@ class ArUcoCamera(ArFeatures.ArCamera):
try:
- # Estimate scene markers poses
- self.aruco_detector.estimate_markers_pose(scene.aruco_markers_group.identifiers)
-
# Estimate scene pose from detected scene markers
- tvec, rmat, _, _ = scene.estimate_pose(self.aruco_detector.detected_markers)
+ tvec, rmat, _ = scene.estimate_pose(self.aruco_detector.detected_markers)
# Project scene into camera frame according estimated pose
- for layer_name, layer_projection in scene.project(tvec, rmat):
+ for layer_name, layer_projection in scene.project(tvec, rmat, self.visual_hfov, self.visual_vfov):
try:
@@ -205,20 +208,23 @@ class ArUcoCamera(ArFeatures.ArCamera):
exceptions[scene_name] = e
+ # Assess projection time in ms
+ projection_time = (time.perf_counter() - projection_start) * 1e3
+
# Unlock camera frame exploitation
self._frame_lock.release()
- # Return dection time and exceptions
- return detection_time, exceptions
+ # Return detection time, projection time and exceptions
+ return detection_time, projection_time, exceptions
- def __image(self, draw_detected_markers: dict = None, draw_scenes: dict = None, draw_optic_parameters_grid: dict = None, **kwargs) -> numpy.array:
+ def __image(self, draw_detected_markers: dict = None, draw_scenes: dict = None, draw_optic_parameters_grid: dict = None, **kwargs: dict) -> numpy.array:
"""Get frame image with ArUco detection visualisation.
Parameters:
- draw_detected_markers: ArucoMarker.draw parameters (if None, no marker drawn)
- draw_scenes: ArUcoScene.draw parameters (if None, no scene drawn)
- draw_optic_parameters_grid: OpticParameter.draw parameters (if None, no grid drawn)
- kwargs: ArCamera.image parameters
+ draw_detected_markers: ArucoMarker.draw parameters (if None, no marker drawn)
+ draw_scenes: ArUcoScene.draw parameters (if None, no scene drawn)
+ draw_optic_parameters_grid: OpticParameter.draw parameters (if None, no grid drawn)
+ kwargs: ArCamera.image parameters
"""
# Can't use camera frame when it is locked
@@ -253,7 +259,7 @@ class ArUcoCamera(ArFeatures.ArCamera):
return image
- def image(self, **kwargs) -> numpy.array:
+ def image(self, **kwargs: dict) -> numpy.array:
"""
Get frame image.
diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py
index 82c9394..e62a42e 100644
--- a/src/argaze/ArUcoMarkers/ArUcoDetector.py
+++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py
@@ -38,7 +38,8 @@ ArUcoDetectorType = TypeVar('ArUcoDetector', bound="ArUcoDetector")
class DetectorParameters():
"""Wrapper class around ArUco marker detector parameters.
- .. note:: More details on [opencv page](https://docs.opencv.org/4.x/d1/dcd/structcv_1_1aruco_1_1DetectorParameters.html)
+ !!! note
+ More details on [opencv page](https://docs.opencv.org/4.x/d1/dcd/structcv_1_1aruco_1_1DetectorParameters.html)
"""
__parameters = aruco.DetectorParameters()
@@ -71,7 +72,8 @@ class DetectorParameters():
'minOtsuStdDev',
'perspectiveRemoveIgnoredMarginPerCell',
'perspectiveRemovePixelPerCell',
- 'polygonalApproxAccuracyRate'
+ 'polygonalApproxAccuracyRate',
+ 'useAruco3Detection'
]
def __init__(self, **kwargs):
@@ -98,8 +100,17 @@ class DetectorParameters():
return DetectorParameters(**json.load(configuration_file))
- def __str__(self, print_all=False) -> str:
- """Detector paremeters string representation."""
+ def __str__(self) -> str:
+ """Detector parameters string representation."""
+
+ return f'{self}'
+
+ def __format__(self, spec: str) -> str:
+ """Formated detector parameters string representation.
+
+ Parameters:
+ spec: 'modified' to get only modified parameters.
+ """
output = ''
@@ -109,7 +120,7 @@ class DetectorParameters():
output += f'\t*{parameter}: {getattr(self.__parameters, parameter)}\n'
- elif print_all:
+ elif spec == "":
output += f'\t{parameter}: {getattr(self.__parameters, parameter)}\n'
@@ -121,26 +132,24 @@ class DetectorParameters():
@dataclass
class ArUcoDetector():
- """ArUco markers detector."""
+ """ArUco markers detector.
- dictionary: ArUcoMarkersDictionary.ArUcoMarkersDictionary = field(default_factory=ArUcoMarkersDictionary.ArUcoMarkersDictionary)
- """ArUco markers dictionary to detect."""
+ Parameters:
+ dictionary: ArUco markers dictionary to detect.
+ marker_size: Size of ArUco markers to detect in centimeter.
+ optic_parameters: Optic parameters to use for ArUco detection into image.
+ parameters: ArUco detector parameters.
+ """
+ dictionary: ArUcoMarkersDictionary.ArUcoMarkersDictionary = field(default_factory=ArUcoMarkersDictionary.ArUcoMarkersDictionary)
marker_size: float = field(default=0.)
- """Size of ArUco markers to detect in centimeter."""
-
optic_parameters: ArUcoOpticCalibrator.OpticParameters = field(default_factory=ArUcoOpticCalibrator.OpticParameters)
- """Optic parameters to use for ArUco detection into image."""
-
parameters: DetectorParameters = field(default_factory=DetectorParameters)
- """ArUco detector parameters."""
def __post_init__(self):
# Init detected markers data
self.__detected_markers = {}
- self.__detected_markers_corners = []
- self.__detected_markers_ids = []
# Init detected board data
self.__board = None
@@ -249,39 +258,41 @@ class ArUcoDetector():
def detect_markers(self, image: numpy.array) -> float:
"""Detect all ArUco markers into an image.
- .. danger:: DON'T MIRROR IMAGE
- It makes the markers detection to fail.
+ !!! danger "DON'T MIRROR IMAGE"
+ It makes the markers detection to fail.
+
+ !!! danger "DON'T UNDISTORED IMAGE"
+ Camera intrisic parameters and distorsion coefficients are used later during pose estimation.
Returns:
- - detection time: marker detection time in ms
+ detection time: marker detection time in ms.
"""
# Reset detected markers data
- self.__detected_markers, self.__detected_markers_corners, self.__detected_markers_ids = {}, [], []
+ self.__detected_markers, detected_markers_corners, detected_markers_ids = {}, [], []
# Store marker detection start date
detection_start = time.perf_counter()
# Detect markers into gray picture
- self.__detected_markers_corners, self.__detected_markers_ids, _ = aruco.detectMarkers(cv.cvtColor(image, cv.COLOR_BGR2GRAY), self.dictionary.markers, parameters = self.parameters.internal)
+ detected_markers_corners, detected_markers_ids, _ = aruco.detectMarkers(cv.cvtColor(image, cv.COLOR_BGR2GRAY), self.dictionary.markers, parameters = self.parameters.internal)
# Assess marker detection time in ms
detection_time = (time.perf_counter() - detection_start) * 1e3
# Is there detected markers ?
- if len(self.__detected_markers_corners) > 0:
+ if len(detected_markers_corners) > 0:
# Transform markers ids array into list
- self.__detected_markers_ids = self.__detected_markers_ids.T[0]
+ detected_markers_ids = detected_markers_ids.T[0]
# Gather detected markers data and update metrics
self.__detection_count += 1
- for i, marker_id in enumerate(self.__detected_markers_ids):
+ for i, marker_id in enumerate(detected_markers_ids):
marker = ArUcoMarker.ArUcoMarker(self.dictionary, marker_id, self.marker_size)
-
- marker.corners = self.__detected_markers_corners[i]
+ marker.corners = detected_markers_corners[i][0]
# No pose estimation: call estimate_markers_pose to get one
marker.translation = numpy.empty([0])
@@ -290,6 +301,7 @@ class ArUcoDetector():
self.__detected_markers[marker_id] = marker
+ # Update metrics
self.__detected_ids.append(marker_id)
return detection_time
@@ -298,31 +310,28 @@ class ArUcoDetector():
"""Estimate pose of current detected markers or of given markers id list."""
# Is there detected markers ?
- if len(self.__detected_markers_corners) > 0:
+ if len(self.__detected_markers) > 0:
- # Is there a marker selection ?
- if len(markers_ids) > 0:
+ # Select all markers by default
+ if len(markers_ids) == 0:
- selected_markers_corners = tuple()
- selected_markers_ids = []
+ markers_ids = self.__detected_markers.keys()
- for i, marker_id in enumerate(self.__detected_markers_ids):
+ # Prepare data for aruco.estimatePoseSingleMarkers function
+ selected_markers_corners = tuple()
+ selected_markers_ids = []
- if marker_id in markers_ids:
+ for marker_id, marker in self.__detected_markers.items():
- selected_markers_corners += (self.__detected_markers_corners[i],)
- selected_markers_ids.append(marker_id)
+ if marker_id in markers_ids:
- # Otherwise, estimate pose of all markers
- else:
-
- selected_markers_corners = self.__detected_markers_corners
- selected_markers_ids = self.__detected_markers_ids
+ selected_markers_corners += (marker.corners,)
+ selected_markers_ids.append(marker_id)
# Estimate pose of selected markers
if len(selected_markers_corners) > 0:
- markers_rvecs, markers_tvecs, markers_points = aruco.estimatePoseSingleMarkers(selected_markers_corners, self.marker_size, numpy.array(self.optic_parameters.K), numpy.array(self.optic_parameters.D))
+ markers_rvecs, markers_tvecs, markers_points = aruco.estimatePoseSingleMarkers(selected_markers_corners, self.marker_size, numpy.array(self.optic_parameters.K), numpy.array(self.optic_parameters.D))
for i, marker_id in enumerate(selected_markers_ids):
@@ -330,7 +339,8 @@ class ArUcoDetector():
marker.translation = markers_tvecs[i][0]
marker.rotation, _ = cv.Rodrigues(markers_rvecs[i][0])
- marker.points = markers_points.reshape(4, 3)
+
+ marker.points = markers_points.reshape(4, 3).dot(marker.rotation) + marker.translation
@property
def detected_markers(self) -> dict[ArUcoMarkerType]:
@@ -361,19 +371,19 @@ class ArUcoDetector():
def detect_board(self, image: numpy.array, board, expected_markers_number):
"""Detect ArUco markers board in image setting up the number of detected markers needed to agree detection.
- .. danger:: DON'T MIRROR IMAGE
- It makes the markers detection to fail.
+ !!! danger "DON'T MIRROR IMAGE"
+ It makes the markers detection to fail.
"""
# detect markers from gray picture
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
- self.__detected_markers_corners, self.__detected_markers_ids, _ = aruco.detectMarkers(gray, self.dictionary.markers, parameters = self.parameters.internal)
+ detected_markers_corners, detected_markers_ids, _ = aruco.detectMarkers(gray, self.dictionary.markers, parameters = self.parameters.internal)
# if all board markers are detected
- if len(self.__detected_markers_corners) == expected_markers_number:
+ if len(detected_markers_corners) == expected_markers_number:
self.__board = board
- self.__board_corners_number, self.__board_corners, self.__board_corners_ids = aruco.interpolateCornersCharuco(self.__detected_markers_corners, self.__detected_markers_ids, gray, self.__board.model)
+ self.__board_corners_number, self.__board_corners, self.__board_corners_ids = aruco.interpolateCornersCharuco(detected_markers_corners, detected_markers_ids, gray, self.__board.model)
else:
@@ -398,9 +408,11 @@ class ArUcoDetector():
@property
def detection_metrics(self) -> Tuple[int, dict]:
"""Get marker detection metrics.
+
Returns:
- number of detect function call
- dict with number of detection for each marker identifier"""
+ number of detect function call
+ dict with number of detection for each marker identifier
+ """
return self.__detection_count, Counter(self.__detected_ids)
diff --git a/src/argaze/ArUcoMarkers/ArUcoMarker.py b/src/argaze/ArUcoMarkers/ArUcoMarker.py
index 57bd8bd..0f368f6 100644
--- a/src/argaze/ArUcoMarkers/ArUcoMarker.py
+++ b/src/argaze/ArUcoMarkers/ArUcoMarker.py
@@ -29,7 +29,7 @@ class ArUcoMarker():
"""Size of marker in centimeters."""
corners: numpy.array = field(init=False, repr=False)
- """Estimated 2D corner positions in camera image referential."""
+ """Estimated 2D corners position in camera image referential."""
translation: numpy.array = field(init=False, repr=False)
"""Estimated 3D center position in camera world referential."""
@@ -68,7 +68,7 @@ class ArUcoMarker():
# Draw marker if required
if color is not None:
- aruco.drawDetectedMarkers(image, [self.corners], numpy.array([self.identifier]), color)
+ aruco.drawDetectedMarkers(image, [numpy.array([list(self.corners)])], numpy.array([self.identifier]), color)
# Draw marker axes if pose has been estimated and if required
if self.translation.size == 3 and self.rotation.size == 9 and draw_axes is not None:
diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py
index 5b6c69d..37bceec 100644
--- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py
+++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py
@@ -17,8 +17,7 @@ import re
from argaze.ArUcoMarkers import ArUcoMarkersDictionary, ArUcoMarker, ArUcoOpticCalibrator
import numpy
-import cv2 as cv
-import cv2.aruco as aruco
+import cv2
T0 = numpy.array([0., 0., 0.])
"""Define no translation vector."""
@@ -58,50 +57,31 @@ def is_rotation_matrix(R):
return n < 1e-3
-def make_euler_rotation_vector(R):
-
- assert(is_rotation_matrix(R))
-
- sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])
-
- singular = sy < 1e-6
-
- if not singular :
- x = math.atan2(R[2,1] , R[2,2])
- y = math.atan2(-R[2,0], sy)
- z = math.atan2(R[1,0], R[0,0])
- else :
- x = math.atan2(-R[1,2], R[1,1])
- y = math.atan2(-R[2,0], sy)
- z = 0
-
- return numpy.array([numpy.rad2deg(x), numpy.rad2deg(y), numpy.rad2deg(z)])
-
@dataclass(frozen=True)
class Place():
- """Define a place as a pose and a marker."""
-
- translation: numpy.array
- """Position in group referential."""
+ """Define a place as list of corners position and a marker.
- rotation: numpy.array
- """Rotation in group referential."""
+ Parameters:
+ corners: 3D corners position in group referential.
+ marker: ArUco marker linked to the place.
+ """
+ corners: numpy.array
marker: dict
- """ArUco marker linked to the place."""
@dataclass
class ArUcoMarkersGroup():
- """Handle group of ArUco markers as one unique spatial entity and estimate its pose."""
+ """Handle group of ArUco markers as one unique spatial entity and estimate its pose.
- marker_size: float = field(default=0.)
- """Expected size of all markers in the group."""
+ Parameters:
+ marker_size: expected size of all markers in the group.
+ dictionary: expected dictionary of all markers in the group.
+ places: expected markers place.
+ """
+ marker_size: float = field(default=0.)
dictionary: ArUcoMarkersDictionary.ArUcoMarkersDictionary = field(default_factory=ArUcoMarkersDictionary.ArUcoMarkersDictionary)
- """Expected dictionary of all markers in the group."""
-
places: dict = field(default_factory=dict)
- """Expected markers place"""
def __post_init__(self):
"""Init group pose and places pose."""
@@ -144,12 +124,16 @@ class ArUcoMarkersGroup():
new_marker = ArUcoMarker.ArUcoMarker(self.dictionary, identifier, self.marker_size)
- new_places[identifier] = Place(tvec, rmat, new_marker)
+ # Build marker corners thanks to translation vector and rotation matrix
+ place_corners = numpy.array([[-self.marker_size/2, self.marker_size/2, 0], [self.marker_size/2, self.marker_size/2, 0], [self.marker_size/2, -self.marker_size/2, 0], [-self.marker_size/2, -self.marker_size/2, 0]])
+ place_corners = place_corners.dot(rmat) + tvec
+
+ new_places[identifier] = Place(place_corners, new_marker)
- # else places are configured using detected markers
+ # else places are configured using detected markers estimated points
elif isinstance(data, ArUcoMarker.ArUcoMarker):
- new_places[identifier] = Place(data.translation, data.rotation, data)
+ new_places[identifier] = Place(data.points, data)
# else places are already at expected format
elif (type(identifier) == int) and isinstance(data, Place):
@@ -158,21 +142,15 @@ class ArUcoMarkersGroup():
self.places = new_places
- # Init place consistency
- self.init_places_consistency()
-
@classmethod
def from_obj(self, obj_filepath: str) -> ArUcoMarkersGroupType:
"""Load ArUco markers group from .obj file.
!!! note
- Expected object (o) name format: <DICTIONARY>#<IDENTIFIER>_Marker
+ Expected object (o) name format: <DICTIONARY>#<IDENTIFIER>_Marker
!!! note
- All markers have to belong to the same dictionary.
-
- !!! note
- Marker normal vectors (vn) expected.
+ All markers have to belong to the same dictionary.
"""
@@ -184,8 +162,7 @@ class ArUcoMarkersGroup():
OBJ_RX_DICT = {
'object': re.compile(r'o (.*)#([0-9]+)_(.*)\n'),
'vertice': re.compile(r'v ([+-]?[0-9]*[.]?[0-9]+) ([+-]?[0-9]*[.]?[0-9]+) ([+-]?[0-9]*[.]?[0-9]+)\n'),
- 'normal': re.compile(r'vn ([+-]?[0-9]*[.]?[0-9]+) ([+-]?[0-9]*[.]?[0-9]+) ([+-]?[0-9]*[.]?[0-9]+)\n'),
- 'face': re.compile(r'f ([0-9]+)//([0-9]+) ([0-9]+)//([0-9]+) ([0-9]+)//([0-9]+) ([0-9]+)//([0-9]+)\n'),
+ 'face': re.compile(r'f ([0-9]+) ([0-9]+) ([0-9]+) ([0-9]+)\n'),
'comment': re.compile(r'#(.*)\n') # keep comment regex after object regex because the # is used in object string too
}
@@ -205,7 +182,6 @@ class ArUcoMarkersGroup():
identifier = None
vertices = []
- normals = {}
faces = {}
# Open the file and read through it line by line
@@ -244,15 +220,10 @@ class ArUcoMarkersGroup():
vertices.append(tuple([float(match.group(1)), float(match.group(2)), float(match.group(3))]))
- # Extract normal to calculate rotation matrix
- elif key == 'normal':
-
- normals[identifier] = tuple([float(match.group(1)), float(match.group(2)), float(match.group(3))])
-
# Extract vertice ids
elif key == 'face':
- faces[identifier] = [int(match.group(1)), int(match.group(3)), int(match.group(5)), int(match.group(7))]
+ faces[identifier] = [int(match.group(1)), int(match.group(2)), int(match.group(3)), int(match.group(4))]
# Go to next line
line = file.readline()
@@ -262,32 +233,20 @@ class ArUcoMarkersGroup():
# Retreive marker vertices thanks to face vertice ids
for identifier, face in faces.items():
- # Gather place corners from counter clockwise ordered face vertices
- corners = numpy.array([ vertices[i-1] for i in face ])
-
- # Edit translation (Tp) allowing to move world axis (W) at place axis (P)
- Tp = corners.mean(axis=0)
+ # Gather place corners in clockwise order
+ cw_corners = numpy.array([ vertices[i-1] for i in reversed(face) ])
# Edit place axis from corners positions
- place_x_axis = corners[1:3].mean(axis=0) - Tp
+ place_x_axis = cw_corners[2] - cw_corners[3]
place_x_axis_norm = numpy.linalg.norm(place_x_axis)
- place_x_axis = place_x_axis / place_x_axis_norm
-
- place_y_axis = corners[2:4].mean(axis=0) - Tp
+
+ place_y_axis = cw_corners[0] - cw_corners[3]
place_y_axis_norm = numpy.linalg.norm(place_y_axis)
- place_y_axis = place_y_axis / place_y_axis_norm
- place_z_axis = normals[identifier]
-
- # Edit rotation (Rp) allowing to transform world axis (W) into place axis (P)
- W = numpy.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
- P = numpy.array([place_x_axis, place_y_axis, place_z_axis])
- Rp = W.dot(P.T)
-
# Check axis size: they should be almost equal
if math.isclose(place_x_axis_norm, place_y_axis_norm, rel_tol=1e-3):
- current_marker_size = place_x_axis_norm*2
+ current_marker_size = place_x_axis_norm
# Check that all markers size are almost equal
if new_marker_size > 0:
@@ -300,7 +259,7 @@ class ArUcoMarkersGroup():
# Create a new place related to a new marker
new_marker = ArUcoMarker.ArUcoMarker(new_dictionary, identifier, new_marker_size)
- new_places[identifier] = Place(Tp, Rp, new_marker)
+ new_places[identifier] = Place(cw_corners, new_marker)
except IOError:
raise IOError(f'File not found: {obj_filepath}')
@@ -335,18 +294,7 @@ class ArUcoMarkersGroup():
output += '\n\n\tPlaces:'
for identifier, place in self.places.items():
output += f'\n\t\t- {identifier}:'
- output += f'\n{place.translation}'
- output += f'\n{place.rotation}'
-
- output += '\n\n\tAngle cache:'
- for A_identifier, A_angle_cache in self.__rotation_cache.items():
- for B_identifier, angle in A_angle_cache.items():
- output += f'\n\t\t- {A_identifier}/{B_identifier}: [{angle[0]:3f} {angle[1]:3f} {angle[2]:3f}]'
-
- output += '\n\n\tDistance cache:'
- for A_identifier, A_distance_cache in self.__translation_cache.items():
- for B_identifier, distance in A_distance_cache.items():
- output += f'\n\t\t- {A_identifier}/{B_identifier}: {distance:3f}'
+ output += f'\n{place.corners}'
return output
@@ -360,8 +308,8 @@ class ArUcoMarkersGroup():
"""Sort markers belonging to the group from given detected markers dict (cf ArUcoDetector.detect_markers()).
Returns:
- dict of markers belonging to this group
- dict of remaining markers not belonging to this group
+ dict of markers belonging to this group
+ dict of remaining markers not belonging to this group
"""
group_markers = {}
@@ -379,148 +327,22 @@ class ArUcoMarkersGroup():
return group_markers, remaining_markers
- def init_places_consistency(self):
- """Initialize places consistency to speed up further markers consistency checking."""
-
- # Process expected rotation between places combinations to speed up further calculations
- self.__rotation_cache = {}
- for (A_identifier, A_place), (B_identifier, B_place) in itertools.combinations(self.places.items(), 2):
-
- A = self.places[A_identifier].rotation
- B = self.places[B_identifier].rotation
-
- if numpy.array_equal(A, B):
-
- AB_rvec = [0., 0., 0.]
- BA_rvec = [0., 0., 0.]
-
- else:
-
- # Calculate euler angle representation of AB and BA rotation matrix
- AB_rvec = make_euler_rotation_vector(B.dot(A.T))
- BA_rvec = make_euler_rotation_vector(A.dot(B.T))
-
- try:
- self.__rotation_cache[A_identifier][B_identifier] = AB_rvec
- except:
- self.__rotation_cache[A_identifier] = {B_identifier: AB_rvec}
-
- try:
- self.__rotation_cache[B_identifier][A_identifier] = BA_rvec
- except:
- self.__rotation_cache[B_identifier] = {A_identifier: BA_rvec}
-
- # Process translation between each places combinations to speed up further calculations
- self.__translation_cache = {}
- for (A_identifier, A_place), (B_identifier, B_place) in itertools.combinations(self.places.items(), 2):
-
- A = self.places[A_identifier].translation
- B = self.places[B_identifier].translation
-
- # Calculate translation between A and B position
- AB_tvec = numpy.linalg.norm(B - A)
-
- try:
- self.__translation_cache[A_identifier][B_identifier] = AB_tvec
- except:
- self.__translation_cache[A_identifier] = {B_identifier: AB_tvec}
+ def estimate_pose_from_markers_corners(self, markers: dict, K: numpy.array, D: numpy.array) -> Tuple[bool, numpy.array, numpy.array]:
+ """Estimate pose from markers corners and places corners.
- try:
- self.__translation_cache[B_identifier][A_identifier] = AB_tvec
- except:
- self.__translation_cache[B_identifier] = {A_identifier: AB_tvec}
-
- def check_markers_consistency(self, group_markers: dict, angle_tolerance: float, distance_tolerance: float) -> Tuple[dict, dict, dict]:
- """Evaluate if given markers configuration match related places configuration.
+ Parameters:
+ markers: detected markers to use for pose estimation.
+ K: intrinsic camera parameters
+ D: camera distorsion matrix
Returns:
- dict of consistent markers
- dict of unconsistent markers
- dict of identified distance or angle unconsistencies and out-of-bounds values
+ success: True if the pose estimation succeeded
+ tvec: scene translation vector
+ rvec: scene rotation vector
"""
- consistent_markers = {}
- unconsistencies = {'rotation': {}, 'translation': {}}
-
- for (A_identifier, A_marker), (B_identifier, B_marker) in itertools.combinations(group_markers.items(), 2):
-
- try:
-
- # Rotation matrix from A marker to B marker
- AB = B_marker.rotation.dot(A_marker.rotation.T)
-
- # Calculate euler angle representation of AB rotation matrix
- AB_rvec = make_euler_rotation_vector(AB)
- expected_rvec= self.__rotation_cache[A_identifier][B_identifier]
-
- # Calculate distance between A marker center and B marker center
- AB_tvec = numpy.linalg.norm(A_marker.translation - B_marker.translation)
- expected_tvec = self.__translation_cache[A_identifier][B_identifier]
-
- # Check angle and distance according given tolerance then normalise marker pose
- consistent_rotation = numpy.allclose(AB_rvec, expected_rvec, atol=angle_tolerance)
- consistent_translation = math.isclose(AB_tvec, expected_tvec, abs_tol=distance_tolerance)
-
- if consistent_rotation and consistent_translation:
-
- if A_identifier not in consistent_markers.keys():
-
- # Remember this marker is already validated
- consistent_markers[A_identifier] = A_marker
-
- if B_identifier not in consistent_markers.keys():
-
- # Remember this marker is already validated
- consistent_markers[B_identifier] = B_marker
-
- else:
-
- if not consistent_rotation:
- unconsistencies['rotation'][f'{A_identifier}/{B_identifier}'] = {'current': AB_rvec, 'expected': expected_rvec}
-
- if not consistent_translation:
- unconsistencies['translation'][f'{A_identifier}/{B_identifier}'] = {'current': AB_tvec, 'expected': expected_tvec}
-
- except KeyError:
-
- raise ValueError(f'Marker {A_identifier} or {B_identifier} don\'t belong to the group.')
-
- # Gather unconsistent markers
- unconsistent_markers = {}
-
- for identifier, marker in group_markers.items():
-
- if identifier not in consistent_markers.keys():
-
- unconsistent_markers[identifier] = marker
-
- return consistent_markers, unconsistent_markers, unconsistencies
-
- def estimate_pose_from_single_marker(self, marker: ArUcoMarker.ArUcoMarker) -> Tuple[numpy.array, numpy.array]:
- """Calculate rotation and translation that move a marker to its place."""
-
- # Get the place related to the given marker
- try:
-
- place = self.places[marker.identifier]
-
- # Rotation matrix that transform marker to related place
- self._rotation = marker.rotation.dot(place.rotation.T)
-
- # Translation vector that transform marker to related place
- self._translation = marker.translation - place.translation.dot(place.rotation).dot(marker.rotation.T)
-
- return self._translation, self._rotation
-
- except KeyError:
-
- raise ValueError(f'Marker {marker.identifier} doesn\'t belong to the group.')
-
- def estimate_pose_from_markers(self, markers: dict) -> Tuple[numpy.array, numpy.array]:
- """Calculate average rotation and translation that move markers to their related places."""
-
- rotations = []
- translations = []
+ markers_corners_2d = []
+ places_corners_3d = []
for identifier, marker in markers.items():
@@ -528,72 +350,30 @@ class ArUcoMarkersGroup():
place = self.places[identifier]
- # Rotation matrix that transform marker to related place
- R = marker.rotation.dot(place.rotation.T)
+ for marker_corner in marker.corners:
+ markers_corners_2d.append(list(marker_corner))
- # Translation vector that transform marker to related place
- T = marker.translation - place.translation.dot(place.rotation).dot(marker.rotation.T)
-
- rotations.append(R)
- translations.append(T)
+ for place_corner in place.corners:
+ places_corners_3d.append(list(place_corner))
except KeyError:
raise ValueError(f'Marker {marker.identifier} doesn\'t belong to the group.')
- # Consider ArUcoMarkersGroup rotation as the mean of all marker rotations
- # !!! WARNING !!! This is a bad hack : processing rotations average is a very complex problem that needs to well define the distance calculation method before.
- self._rotation = numpy.mean(numpy.array(rotations), axis=0)
-
- # Consider ArUcoMarkersGroup translation as the mean of all marker translations
- self._translation = numpy.mean(numpy.array(translations), axis=0)
-
- return self._translation, self._rotation
-
- def estimate_pose_from_axis_markers(self, origin_marker: ArUcoMarker.ArUcoMarker, horizontal_axis_marker: ArUcoMarker.ArUcoMarker, vertical_axis_marker: ArUcoMarker.ArUcoMarker) -> Tuple[numpy.array, numpy.array]:
- """Calculate rotation and translation from 3 markers defining an orthogonal axis."""
-
- O_marker = origin_marker
- A_marker = horizontal_axis_marker
- B_marker = vertical_axis_marker
-
- O_place = self.places[O_marker.identifier]
- A_place = self.places[A_marker.identifier]
- B_place = self.places[B_marker.identifier]
+ # SolvPnP using cv2.SOLVEPNP_SQPNP flag
+ # TODO: it works also with cv2.SOLVEPNP_EPNP flag so we need to test which is the faster.
+ # About SolvPnP flags: https://docs.opencv.org/4.x/d5/d1f/calib3d_solvePnP.html
+ success, rvec, tvec = cv2.solvePnP(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), flags=cv2.SOLVEPNP_SQPNP)
- # Place axis
- OA = A_place.translation - O_place.translation
- OA = OA / numpy.linalg.norm(OA)
+ # Refine pose estimation using Gauss-Newton optimisation
+ if success :
- OB = B_place.translation - O_place.translation
- OB = OB / numpy.linalg.norm(OB)
+ rvec, tvec = cv2.solvePnPRefineVVS(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), rvec, tvec)
- # Detect and correct bad place axis orientation
- X_sign = numpy.sign(OA)[0]
- Y_sign = numpy.sign(OB)[1]
+ self._translation = tvec.T
+ self._rotation = rvec.T
- P = numpy.array([OA*X_sign, OB*Y_sign, numpy.cross(OA*X_sign, OB*Y_sign)])
-
- # Marker axis
- OA = A_marker.translation - O_marker.translation
- OA = OA / numpy.linalg.norm(OA)
-
- OB = B_marker.translation - O_marker.translation
- OB = OB / numpy.linalg.norm(OB)
-
- # Detect and correct bad place axis orientation
- X_sign = numpy.sign(OA)[0]
- Y_sign = -numpy.sign(OB)[1]
-
- M = numpy.array([OA*X_sign, OB*Y_sign, numpy.cross(OA*X_sign, OB*Y_sign)])
-
- # Then estimate ArUcoMarkersGroup rotation
- self._rotation = P.dot(M.T)
-
- # Consider ArUcoMarkersGroup translation as the translation of the marker at axis origin
- self._translation = O_marker.translation - O_place.translation.dot(O_place.rotation).dot(M.T)
-
- return self._translation, self._rotation
+ return success, self._translation, self._rotation
@property
def translation(self) -> numpy.array:
@@ -622,15 +402,15 @@ class ArUcoMarkersGroup():
try:
axisPoints = numpy.float32([[length, 0, 0], [0, length, 0], [0, 0, length], [0, 0, 0]]).reshape(-1, 3)
- axisPoints, _ = cv.projectPoints(axisPoints, self._rotation, self._translation, numpy.array(K), numpy.array(D))
+ axisPoints, _ = cv2.projectPoints(axisPoints, self._rotation, self._translation, numpy.array(K), numpy.array(D))
axisPoints = axisPoints.astype(int)
- cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (0, 0, 255), thickness) # X (red)
- cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (0, 255, 0), thickness) # Y (green)
- cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (255, 0, 0), thickness) # Z (blue)
+ cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (0, 0, 255), thickness) # X (red)
+ cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (0, 255, 0), thickness) # Y (green)
+ cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (255, 0, 0), thickness) # Z (blue)
# Ignore errors due to out of field axis: their coordinate are larger than int32 limitations.
- except cv.error:
+ except cv2.error:
pass
def draw_places(self, image: numpy.array, K, D, color: tuple = None, border_size: int = 0):
@@ -642,52 +422,24 @@ class ArUcoMarkersGroup():
try:
- T = self.places[identifier].translation
- R = self.places[identifier].rotation
-
- placePoints = (T + numpy.float32([R.dot([-l, -l, 0]), R.dot([l, -l, 0]), R.dot([l, l, 0]), R.dot([-l, l, 0])])).reshape(-1, 3)
- placePoints, _ = cv.projectPoints(placePoints, self._rotation, self._translation, numpy.array(K), numpy.array(D))
+ placePoints, _ = cv2.projectPoints(place.corners, self._rotation, self._translation, numpy.array(K), numpy.array(D))
placePoints = placePoints.astype(int)
- cv.line(image, tuple(placePoints[0].ravel()), tuple(placePoints[1].ravel()), color, border_size)
- cv.line(image, tuple(placePoints[1].ravel()), tuple(placePoints[2].ravel()), color, border_size)
- cv.line(image, tuple(placePoints[2].ravel()), tuple(placePoints[3].ravel()), color, border_size)
- cv.line(image, tuple(placePoints[3].ravel()), tuple(placePoints[0].ravel()), color, border_size)
+ cv2.line(image, tuple(placePoints[0].ravel()), tuple(placePoints[1].ravel()), color, border_size)
+ cv2.line(image, tuple(placePoints[1].ravel()), tuple(placePoints[2].ravel()), color, border_size)
+ cv2.line(image, tuple(placePoints[2].ravel()), tuple(placePoints[3].ravel()), color, border_size)
+ cv2.line(image, tuple(placePoints[3].ravel()), tuple(placePoints[0].ravel()), color, border_size)
# Ignore errors due to out of field places: their coordinate are larger than int32 limitations.
- except cv.error:
+ except cv2.error:
pass
- def draw_places_axes(self, image: numpy.array, K, D, thickness: int = 0, length: float = 0):
- """Draw group place axes."""
-
- for identifier, place in self.places.items():
-
- try:
-
- T = self.places[identifier].translation
- R = self.places[identifier].rotation
-
- axisPoints = (T + numpy.float32([R.dot([length, 0, 0]), R.dot([0, length, 0]), R.dot([0, 0, length]), R.dot([0, 0, 0])])).reshape(-1, 3)
- axisPoints, _ = cv.projectPoints(axisPoints, self._rotation, self._translation, numpy.array(K), numpy.array(D))
- axisPoints = axisPoints.astype(int)
-
- cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (0, 0, 255), thickness) # X (red)
- cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (0, 255, 0), thickness) # Y (green)
- cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (255, 0, 0), thickness) # Z (blue)
-
- # Ignore errors due to out of field places: their coordinate are larger than int32 limitations.
- except cv.error:
- pass
-
- def draw(self, image: numpy.array, K, D, draw_axes: dict = None, draw_places: dict = None, draw_places_axes: dict = None):
+ def draw(self, image: numpy.array, K, D, draw_axes: dict = None, draw_places: dict = None):
"""Draw group axes and places.
Parameters:
-
draw_axes: draw_axes parameters (if None, no axes drawn)
draw_places: draw_places parameters (if None, no places drawn)
- draw_places_axes: draw_places_axes parameters (if None, no places axes drawn)
"""
# Draw axes if required
@@ -700,11 +452,6 @@ class ArUcoMarkersGroup():
self.draw_places(image, K, D, **draw_places)
- # Draw places axes if required
- if draw_places_axes is not None:
-
- self.draw_places_axes(image, K, D, **draw_places_axes)
-
def to_obj(self, obj_filepath):
"""Save group to .obj file."""
@@ -715,26 +462,19 @@ class ArUcoMarkersGroup():
v_count = 0
- for identifier, place in self.places.items():
+ for p, (identifier, place) in enumerate(self.places.items()):
file.write(f'o {self.dictionary.name}#{identifier}_Marker\n')
vertices = ''
- T = place.translation
- R = place.rotation
-
- points = (T + numpy.float32([R.dot(place.marker.points[0]), R.dot(place.marker.points[1]), R.dot(place.marker.points[2]), R.dot(place.marker.points[3])])).reshape(-1, 3)
-
- print(points)
-
# Write vertices in reverse order
- for i in [3, 2, 1, 0]:
+ for v in [3, 2, 1, 0]:
- file.write(f'v {" ".join(map(str, points[i]))}\n')
+ file.write(f'v {" ".join(map(str, place.corners[v]))}\n')
v_count += 1
vertices += f' {v_count}'
- file.write('s off\n')
+ #file.write('s off\n')
file.write(f'f{vertices}\n')
diff --git a/src/argaze/ArUcoMarkers/ArUcoScene.py b/src/argaze/ArUcoMarkers/ArUcoScene.py
index f6b303a..51dd88c 100644
--- a/src/argaze/ArUcoMarkers/ArUcoScene.py
+++ b/src/argaze/ArUcoMarkers/ArUcoScene.py
@@ -96,14 +96,13 @@ class ArUcoScene(ArFeatures.ArScene):
# Create new aruco scene using temporary ar scene values
return ArUcoScene(aruco_markers_group=new_aruco_markers_group, **temp_ar_scene_values)
- def estimate_pose(self, detected_markers) -> Tuple[numpy.array, numpy.array, str, dict]:
+ def estimate_pose(self, detected_markers) -> Tuple[numpy.array, numpy.array, dict]:
"""Estimate scene pose from detected ArUco markers.
Returns:
- scene translation vector
- scene rotation matrix
- pose estimation strategy
- dict of markers used to estimate the pose
+ scene translation vector
+ scene rotation matrix
+ dict of markers used to estimate the pose
"""
# Pose estimation fails when no marker is detected
@@ -118,26 +117,19 @@ class ArUcoScene(ArFeatures.ArScene):
raise ArFeatures.PoseEstimationFailed('No marker belongs to the scene')
- # Estimate scene pose from unique marker transformations
- elif len(scene_markers) == 1:
+ # Pose estimation fails if only one marker belongs to the scene
+ if len(scene_markers) == 1:
- marker_id, marker = scene_markers.popitem()
- tvec, rmat = self.aruco_markers_group.estimate_pose_from_single_marker(marker)
-
- return tvec, rmat, 'estimate_pose_from_single_marker', {marker_id: marker}
+ raise ArFeatures.PoseEstimationFailed('Only one marker belongs to the scene')
- # Otherwise, check markers consistency
- consistent_markers, unconsistent_markers, unconsistencies = self.aruco_markers_group.check_markers_consistency(scene_markers, self.angle_tolerance, self.distance_tolerance)
+ # Estimate pose from a markers corners
+ success, tvec, rmat = self.aruco_markers_group.estimate_pose_from_markers_corners(scene_markers, self.parent.aruco_detector.optic_parameters.K, self.parent.aruco_detector.optic_parameters.D)
- # Pose estimation fails when no marker passes consistency checking
- if len(consistent_markers) == 0:
+ if not success:
- raise ArFeatures.PoseEstimationFailed('Unconsistent marker poses', unconsistencies)
+ raise ArFeatures.PoseEstimationFailed('Can\'t estimate pose from markers corners positions')
- # Otherwise, estimate scene pose from all consistent markers pose
- tvec, rmat = self.aruco_markers_group.estimate_pose_from_markers(consistent_markers)
-
- return tvec, rmat, 'estimate_pose_from_markers', consistent_markers
+ return tvec, rmat, scene_markers
def draw(self, image: numpy.array, draw_aruco_markers_group: dict = None):
"""
diff --git a/src/argaze/AreaOfInterest/AOI2DScene.py b/src/argaze/AreaOfInterest/AOI2DScene.py
index 73c977f..062044f 100644
--- a/src/argaze/AreaOfInterest/AOI2DScene.py
+++ b/src/argaze/AreaOfInterest/AOI2DScene.py
@@ -10,21 +10,103 @@ __license__ = "BSD"
from typing import TypeVar, Tuple
from argaze import DataStructures
-from argaze.AreaOfInterest import AOIFeatures
+from argaze.AreaOfInterest import AOIFeatures, AOI3DScene
from argaze import GazeFeatures
import cv2
import numpy
+from xml.dom import minidom
AOI2DSceneType = TypeVar('AOI2DScene', bound="AOI2DScene")
# Type definition for type annotation convenience
+AOI3DSceneType = TypeVar('AOI3DScene', bound="AOI3DScene")
+# Type definition for type annotation convenience
+
class AOI2DScene(AOIFeatures.AOIScene):
"""Define AOI 2D scene."""
- def __init__(self, aois_2d: dict = None):
+ def __init__(self, aoi_2d: dict = None):
+
+ super().__init__(2, aoi_2d)
+
+ @classmethod
+ def from_svg(self, svg_filepath: str) -> AOI2DSceneType:
+ """
+ Load areas from .svg file.
+
+ Parameters:
+ svg_filepath: path to svg file
+
+ !!! note
+ Available SVG elements are: path, rect and circle.
+
+ !!! warning
+ Available SVG path d-string commands are: MoveTo (M) LineTo (L) and ClosePath (Z) commands.
+ """
+
+ with minidom.parse(svg_filepath) as description_file:
+
+ new_areas = {}
+
+ # Load SVG path
+ for path in description_file.getElementsByTagName('path'):
+
+ # Convert d-string into array
+ d_string = path.getAttribute('d')
+
+ assert(d_string[0] == 'M')
+ assert(d_string[-1] == 'Z')
+
+ points = [(float(x), float(y)) for x, y in [p.split(',') for p in d_string[1:-1].split('L')]]
+
+ new_areas[path.getAttribute('id')] = AOIFeatures.AreaOfInterest(points)
+
+ # Load SVG rect
+ for rect in description_file.getElementsByTagName('rect'):
+
+ # Convert rect element into dict
+ rect_dict = {
+ "Rectangle": {
+ 'x': float(rect.getAttribute('x')),
+ 'y': float(rect.getAttribute('y')),
+ 'width': float(rect.getAttribute('width')),
+ 'height': float(rect.getAttribute('height'))
+ }
+ }
+
+ new_areas[rect.getAttribute('id')] = AOIFeatures.AreaOfInterest.from_dict(rect_dict)
- super().__init__(2, aois_2d)
+ # Load SVG circle
+ for circle in description_file.getElementsByTagName('circle'):
+
+ # Convert circle element into dict
+ circle_dict = {
+ "Circle": {
+ 'cx': float(circle.getAttribute('cx')),
+ 'cy': float(circle.getAttribute('cy')),
+ 'radius': float(circle.getAttribute('r'))
+ }
+ }
+
+ new_areas[circle.getAttribute('id')] = AOIFeatures.AreaOfInterest.from_dict(circle_dict)
+
+ # Load SVG ellipse
+ for ellipse in description_file.getElementsByTagName('ellipse'):
+
+ # Convert ellipse element into dict
+ ellipse_dict = {
+ "Ellipse": {
+ 'cx': float(circle.getAttribute('cx')),
+ 'cy': float(circle.getAttribute('cy')),
+ 'rx': float(circle.getAttribute('rx')),
+ 'ry': float(circle.getAttribute('ry'))
+ }
+ }
+
+ new_areas[ellipse.getAttribute('id')] = AOIFeatures.AreaOfInterest.from_dict(ellipse_dict)
+
+ return AOI2DScene(new_areas)
def draw(self, image: numpy.array, draw_aoi: dict = None, exclude=[]):
"""Draw AOI polygons on image.
@@ -56,7 +138,7 @@ class AOI2DScene(AOIFeatures.AOIScene):
yield name, aoi, matching
def draw_raycast(self, image: numpy.array, pointer:tuple, exclude=[], base_color=(0, 0, 255), matching_color=(0, 255, 0)):
- """Draw AOIs with their matching status."""
+ """Draw AOI with their matching status."""
for name, aoi, matching in self.raycast(pointer):
@@ -89,6 +171,7 @@ class AOI2DScene(AOIFeatures.AOIScene):
yield name, aoi, matched_region, aoi_ratio, circle_ratio
+ '''DEPRECATED: but maybe still usefull?
def reframe(self, aoi: AOIFeatures.AreaOfInterest, size: tuple) -> AOI2DSceneType:
"""
Reframe whole scene to a scene bounded by a 4 vertices 2D AOI.
@@ -120,3 +203,31 @@ class AOI2DScene(AOIFeatures.AOIScene):
aoi2D_scene[name] = numpy.matmul(aoi2D - Src_origin, M.T)
return aoi2D_scene
+ '''
+ def dimensionalize(self, rectangle_3d: AOIFeatures.AreaOfInterest, size: tuple) -> AOI3DSceneType:
+ """
+ Convert to 3D scene considering it is inside of 3D rectangular frame.
+
+ Parameters:
+ rectangle_3d: rectangle 3D AOI to use as referential plane
+ size: size of the frame in pixel
+
+ Returns:
+ AOI 3D scene
+ """
+
+ assert(rectangle_3d.dimension == 3)
+ assert(rectangle_3d.points_number == 4)
+
+ # Vectorize outter_axis function
+ vfunc = numpy.vectorize(rectangle_3d.outter_axis)
+
+ # Prepare new AOI 3D scene
+ aoi3D_scene = AOI3DScene.AOI3DScene()
+
+ for name, aoi2D in self.items():
+
+ X, Y = (aoi2D / size).T
+ aoi3D_scene[name] = numpy.array(vfunc(X, Y)).T.view(AOIFeatures.AreaOfInterest)
+
+ return aoi3D_scene
diff --git a/src/argaze/AreaOfInterest/AOI3DScene.py b/src/argaze/AreaOfInterest/AOI3DScene.py
index 8ea6048..33a815c 100644
--- a/src/argaze/AreaOfInterest/AOI3DScene.py
+++ b/src/argaze/AreaOfInterest/AOI3DScene.py
@@ -38,15 +38,15 @@ AOI2DSceneType = TypeVar('AOI2DScene', bound="AOI2DScene")
class AOI3DScene(AOIFeatures.AOIScene):
"""Define AOI 3D scene."""
- def __init__(self, aois_3d: dict = None):
+ def __init__(self, aoi_3d: dict = None):
- super().__init__(3, aois_3d)
+ super().__init__(3, aoi_3d)
@classmethod
def from_obj(self, obj_filepath: str) -> AOI3DSceneType:
"""Load AOI3D scene from .obj file."""
- aois_3d = {}
+ aoi_3d = {}
# regex rules for .obj file parsing
OBJ_RX_DICT = {
@@ -108,15 +108,15 @@ class AOI3DScene(AOIFeatures.AOIScene):
file.close()
- # retreive all aoi3D vertices
+ # retreive all aoi3D vertices and sort them in clockwise order
for name, face in faces.items():
- aoi3D = AOIFeatures.AreaOfInterest([ vertices[i-1] for i in face ])
- aois_3d[name] = aoi3D
+ aoi3D = AOIFeatures.AreaOfInterest([ vertices[i-1] for i in reversed(face) ])
+ aoi_3d[name] = aoi3D
except IOError:
raise IOError(f'File not found: {obj_filepath}')
- return AOI3DScene(aois_3d)
+ return AOI3DScene(aoi_3d)
def to_obj(self, obj_filepath: str):
"""Save AOI3D scene into .obj file."""
@@ -149,8 +149,9 @@ class AOI3DScene(AOIFeatures.AOIScene):
file.write('s off\n')
file.write(vertices_ids + '\n')
+ '''DEPRECATED: but maybe still usefull?
@property
- def orthogonal_projection(self) -> AOI2DScene.AOI2DScene:
+ def orthogonal_projection(self) -> AOI2DSceneType:
"""
Orthogonal projection of whole scene.
@@ -169,7 +170,7 @@ class AOI3DScene(AOIFeatures.AOIScene):
K = numpy.array([[scene_size[1]/scene_size[0], 0.0, 0.5], [0.0, 1., 0.5], [0.0, 0.0, 1.0]])
return self.project(tvec, rvec, K)
-
+ '''
def vision_cone(self, cone_radius, cone_height, cone_tip=[0., 0., 0.], cone_direction=[0., 0., 1.]) -> Tuple[AOI3DSceneType, AOI3DSceneType]:
"""Get AOI which are inside and out a given cone field.
diff --git a/src/argaze/AreaOfInterest/AOIFeatures.py b/src/argaze/AreaOfInterest/AOIFeatures.py
index 8987beb..5637baa 100644
--- a/src/argaze/AreaOfInterest/AOIFeatures.py
+++ b/src/argaze/AreaOfInterest/AOIFeatures.py
@@ -11,6 +11,7 @@ from typing import TypeVar, Tuple
from dataclasses import dataclass, field
import json
import os
+import math
from argaze import DataStructures
@@ -41,6 +42,53 @@ class AreaOfInterest(numpy.ndarray):
return repr(self.tolist())
+ @classmethod
+ def from_dict(self, aoi_data: dict, working_directory: str = None) -> AreaOfInterestType:
+ """Load attributes from dictionary.
+
+ Parameters:
+ aoi_data: dictionary with attributes to load
+ working_directory: folder path where to load files when a dictionary value is a relative filepath.
+ """
+
+ # Get first and unique shape
+ # TODO: allow multiple shapes to describe more complex AOI
+ shape, shape_data = aoi_data.popitem()
+
+ if shape == 'Rectangle':
+
+ x = shape_data.pop('x')
+ y = shape_data.pop('y')
+ width = shape_data.pop('width')
+ height = shape_data.pop('height')
+
+ points = [[x, y], [x+width, y], [x+width, y+height], [x, y+height]]
+
+ return AreaOfInterest(points)
+
+ elif shape == 'Circle':
+
+ cx = shape_data.pop('cx')
+ cy = shape_data.pop('cy')
+ radius = shape_data.pop('radius')
+
+ # TODO: Use pygeos
+ N = 32
+ points = [(math.cos(2*math.pi / N*x) * radius + cx, math.sin(2*math.pi / N*x) * radius + cy) for x in range(0, N+1)]
+
+ return AreaOfInterest(points)
+
+ elif shape == 'Ellipse':
+
+ cx = shape_data.pop('cx')
+ cy = shape_data.pop('cy')
+ rx = shape_data.pop('rx')
+ ry = shape_data.pop('ry')
+
+ # TODO: Use pygeos
+ N = 32
+ points = [(math.cos(2*math.pi / N*x) * rx + cx, math.sin(2*math.pi / N*x) * ry + cy) for x in range(0, N+1)]
+
@property
def dimension(self) -> int:
"""Number of axis coding area points positions."""
@@ -127,8 +175,8 @@ class AreaOfInterest(numpy.ndarray):
return mpath.Path(self).contains_points([point])[0]
- def inner_axis(self, point: tuple) -> tuple:
- """Transform the coordinates from the global axis to the AOI's axis.
+ def inner_axis(self, x: float, y: float) -> tuple:
+ """Transform a point coordinates from global axis to AOI axis.
!!! warning
Available for 2D AOI only.
!!! danger
@@ -143,35 +191,30 @@ class AreaOfInterest(numpy.ndarray):
Dst = numpy.array([[0., 0.], [1., 0.], [1., 1.], [0., 1.]]).astype(numpy.float32)
P = cv2.getPerspectiveTransform(Src, Dst)
- X = numpy.append(numpy.array(numpy.array(point) - Src_origin), [1.0]).astype(numpy.float32)
+ X = numpy.append(numpy.array(numpy.array([x, y]) - Src_origin), [1.0]).astype(numpy.float32)
Y = numpy.dot(P, X)
La = (Y/Y[2])[:-1]
return tuple(numpy.around(La, 4))
- def outter_axis(self, point: tuple) -> tuple:
- """Transform the coordinates from the AOI's axis to the global axis.
- !!! warning
- Available for 2D AOI only.
+ def outter_axis(self, x: float, y: float) -> tuple:
+ """Transform a point coordinates from AOI axis to global axis.
!!! danger
- The AOI points must be sorted in clockwise order."""
-
- assert(self.dimension == 2)
-
- Src = numpy.array([[0., 0.], [1., 0.], [1., 1.], [0., 1.]]).astype(numpy.float32)
+ The AOI points must be sorted in clockwise order.
+ !!! danger
+ The AOI must be a rectangle."""
- Dst = self.astype(numpy.float32)
- Dst_origin = Dst[0]
- Dst = (Dst - Dst_origin).reshape((len(Dst)), 2)
+ # Origin point
+ O = self[0]
- P = cv2.getPerspectiveTransform(Src, Dst)
- X = numpy.array([point[0], point[1], 1.0]).astype(numpy.float32)
- Y = numpy.dot(P, X)
+ # Horizontal axis vector
+ H = self[1] - self[0]
- Lp = Dst_origin + (Y/Y[2])[:-1]
+ # Vertical axis vector
+ V = self[3] - self[0]
- return tuple(numpy.rint(Lp).astype(int))
+ return tuple(O + x * H + y * V)
def circle_intersection(self, center: tuple, radius: float) -> Tuple[numpy.array, float, float]:
"""Get intersection shape with a circle, intersection area / AOI area ratio and intersection area / circle area ratio.
@@ -254,8 +297,15 @@ class AOIScene():
# Load areas
areas = {}
- for name, area in aoi_scene_data.items():
- areas[name] = AreaOfInterest(area)
+ for area_name, area_data in aoi_scene_data.items():
+
+ if type(area_data) == list:
+
+ areas[area_name] = AreaOfInterest(area_data)
+
+ elif type(area_data) == dict:
+
+ areas[area_name] = AreaOfInterest.from_dict(area_data)
# Default dimension is 0
dimension = 0
@@ -281,7 +331,7 @@ class AOIScene():
aoi_scene_data = json.load(configuration_file)
working_directory = os.path.dirname(json_filepath)
- return AOIScene.from_dict(aoi_scene_data, working_directory)
+ return AOIScene.from_dict(aoi_scene_data, working_directory)
def __getitem__(self, name) -> AreaOfInterest:
"""Get an AOI from the scene."""
@@ -353,6 +403,42 @@ class AOIScene():
return output
+ def __add__(self, add_vector) -> AOISceneType:
+ """Add vector to scene."""
+
+ assert(len(add_vector) == self.__dimension)
+
+ for name, area in self.__areas.items():
+
+ self.__areas[name] = self.__areas[name] + add_vector
+
+ return self
+
+ # Allow n + scene operation
+ __radd__ = __add__
+
+ def __sub__(self, sub_vector) -> AOISceneType:
+ """Sub vector to scene."""
+
+ assert(len(sub_vector) == self.__dimension)
+
+ for name, area in self.__areas.items():
+
+ self.__areas[name] = self.__areas[name] - sub_vector
+
+ return self
+
+ def __rsub__(self, rsub_vector) -> AOISceneType:
+ """RSub vector to scene."""
+
+ assert(len(rsub_vector) == self.__dimension)
+
+ for name, area in self.__areas.items():
+
+ self.__areas[name] = rsub_vector - self.__areas[name]
+
+ return self
+
def __mul__(self, scale_vector) -> AOISceneType:
"""Scale scene by a vector."""
@@ -367,6 +453,16 @@ class AOIScene():
# Allow n * scene operation
__rmul__ = __mul__
+ def __truediv__(self, div_vector) -> AOISceneType:
+
+ assert(len(div_vector) == self.__dimension)
+
+ for name, area in self.__areas.items():
+
+ self.__areas[name] = self.__areas[name] / div_vector
+
+ return self
+
def items(self) -> Tuple[str, AreaOfInterest]:
"""Iterate over areas."""
@@ -379,7 +475,7 @@ class AOIScene():
@property
def dimension(self) -> int:
- """Dimension of the AOIs in scene."""
+ """Dimension of the AOI in scene."""
return self.__dimension
diff --git a/src/argaze/DataStructures.py b/src/argaze/DataStructures.py
index 08a7d2c..9e35dea 100644
--- a/src/argaze/DataStructures.py
+++ b/src/argaze/DataStructures.py
@@ -45,6 +45,15 @@ def as_dict(dataclass_object) -> dict:
# Copy fields values
return {name: vars(dataclass_object)[name] for name in fields_names}
+def module_path(obj) -> str:
+ """
+ Get object module path.
+
+ Returns:
+ module path
+ """
+ return obj.__class__.__module__
+
class JsonEncoder(json.JSONEncoder):
"""Specific ArGaze JSON Encoder."""
@@ -55,10 +64,10 @@ class JsonEncoder(json.JSONEncoder):
if isinstance(obj, numpy.integer):
return int(obj)
- if isinstance(obj, numpy.floating):
+ elif isinstance(obj, numpy.floating):
return float(obj)
- if isinstance(obj, numpy.ndarray):
+ elif isinstance(obj, numpy.ndarray):
return obj.tolist()
# default case
@@ -73,7 +82,19 @@ class JsonEncoder(json.JSONEncoder):
public_dict = {}
for k, v in vars(obj).items():
+
if not k.startswith('_'):
+
+ # numpy cases
+ if isinstance(v, numpy.integer):
+ v = int(v)
+
+ elif isinstance(v, numpy.floating):
+ v = float(v)
+
+ elif isinstance(v, numpy.ndarray):
+ v = v.tolist()
+
public_dict[k] = v
return public_dict
diff --git a/src/argaze/GazeAnalysis/Basic.py b/src/argaze/GazeAnalysis/Basic.py
index 7b41731..dc7b4fd 100644
--- a/src/argaze/GazeAnalysis/Basic.py
+++ b/src/argaze/GazeAnalysis/Basic.py
@@ -79,12 +79,27 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer):
self.__steps_number = len(aoi_scan_path)
sum_fixation_durations = 0
+ self.__sum_aoi_fixation_durations = {}
for aoi_scan_step in aoi_scan_path:
sum_fixation_durations += aoi_scan_step.fixation_duration
+ try:
+
+ self.__sum_aoi_fixation_durations[aoi_scan_step.aoi] = self.__sum_aoi_fixation_durations[aoi_scan_step.aoi] + aoi_scan_step.fixation_duration
+
+ except KeyError:
+
+ self.__sum_aoi_fixation_durations[aoi_scan_step.aoi] = aoi_scan_step.fixation_duration
+
self.__step_fixation_durations_average = sum_fixation_durations / self.__steps_number
+
+ self.__aoi_fixation_distribution = {}
+
+ for aoi_name, sum_aoi_fixation_duration in self.__sum_aoi_fixation_durations.items():
+
+ self.__aoi_fixation_distribution[aoi_name] = sum_aoi_fixation_duration / sum_fixation_durations
@property
def path_duration(self) -> float:
@@ -102,4 +117,10 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer):
def step_fixation_durations_average(self) -> float:
"""AOI scan path step fixation durations average."""
- return self.__step_fixation_durations_average \ No newline at end of file
+ return self.__step_fixation_durations_average
+
+ @property
+ def aoi_fixation_distribution(self) -> dict:
+ """percentage of time spent on each AOI."""
+
+ return self.__aoi_fixation_distribution \ No newline at end of file
diff --git a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py
index f0decfc..acc0665 100644
--- a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py
+++ b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py
@@ -33,8 +33,8 @@ class AOIMatcher(GazeFeatures.AOIMatcher):
self.__look_count = 0
self.__looked_aoi_data = (None, None)
+ self.__looked_probabilities = {}
self.__circle_ratio_sum = {}
- self.__aois_coverages = {}
self.__matched_gaze_movement = None
self.__matched_region = None
@@ -54,7 +54,7 @@ class AOIMatcher(GazeFeatures.AOIMatcher):
# BAD: we use deviation_max attribute which is an attribute of DispersionThresholdIdentification.Fixation class
region, _, circle_ratio = aoi.circle_intersection(gaze_movement.focus, gaze_movement.deviation_max)
- if name not in self.exclude and circle_ratio > 0:
+ if name not in self.exclude and circle_ratio > self.coverage_threshold:
# Sum circle ratio to update aoi coverage
try:
@@ -78,15 +78,15 @@ class AOIMatcher(GazeFeatures.AOIMatcher):
# Update looked aoi data
self.__looked_aoi_data = most_likely_looked_aoi_data
- # Calculate looked aoi circle ratio means
- self.__aois_coverages = {}
+ # Calculate circle ratio means as looked probabilities
+ self.__looked_probabilities = {}
for aoi_name, circle_ratio_sum in self.__circle_ratio_sum.items():
circle_ratio_mean = circle_ratio_sum / self.__look_count
- # filter circle ration mean greater than 1
- self.__aois_coverages[aoi_name] = circle_ratio_mean if circle_ratio_mean < 1 else 1
+ # Avoid probability greater than 1
+ self.__looked_probabilities[aoi_name] = circle_ratio_mean if circle_ratio_mean < 1 else 1
# Update matched gaze movement
self.__matched_gaze_movement = gaze_movement
@@ -95,9 +95,7 @@ class AOIMatcher(GazeFeatures.AOIMatcher):
self.__matched_region = matched_region
# Return
- if self.__aois_coverages[most_likely_looked_aoi_data[0]] > self.coverage_threshold:
-
- return self.__looked_aoi_data
+ return self.__looked_aoi_data
elif GazeFeatures.is_saccade(gaze_movement):
@@ -109,14 +107,13 @@ class AOIMatcher(GazeFeatures.AOIMatcher):
return (None, None)
- def draw(self, image: numpy.array, aoi_scene: AOIFeatures.AOIScene, draw_matched_fixation: dict = None, draw_matched_fixation_positions: dict = None, draw_matched_region: dict = None, draw_looked_aoi: dict = None, update_looked_aoi: bool = False, looked_aoi_name_color: tuple = None, looked_aoi_name_offset: tuple = (0, 0)):
+ def draw(self, image: numpy.array, aoi_scene: AOIFeatures.AOIScene, draw_matched_fixation: dict = None, draw_matched_region: dict = None, draw_looked_aoi: dict = None, update_looked_aoi: bool = False, looked_aoi_name_color: tuple = None, looked_aoi_name_offset: tuple = (0, 0)):
"""Draw matching into image.
Parameters:
image: where to draw
aoi_scene: to refresh looked aoi if required
draw_matched_fixation: Fixation.draw parameters (which depends of the loaded gaze movement identifier module, if None, no fixation is drawn)
- draw_matched_fixation_positions: GazeMovement.draw_positions parameters (if None, no fixation is drawn)
draw_matched_region: AOIFeatures.AOI.draw parameters (if None, no matched region is drawn)
draw_looked_aoi: AOIFeatures.AOI.draw parameters (if None, no looked aoi is drawn)
looked_aoi_name_color: color of text (if None, no looked aoi name is drawn)
@@ -132,11 +129,6 @@ class AOIMatcher(GazeFeatures.AOIMatcher):
self.__matched_gaze_movement.draw(image, **draw_matched_fixation)
- # Draw matched fixation positions if required
- if draw_matched_fixation_positions is not None:
-
- self.__matched_gaze_movement.draw_positions(image, **draw_matched_fixation_positions)
-
# Draw matched aoi
if self.looked_aoi.all() is not None:
@@ -179,8 +171,11 @@ class AOIMatcher(GazeFeatures.AOIMatcher):
return self.__looked_aoi_data[0]
@property
- def aois_coverages(self) -> dict:
- """Get all aois coverage means for current fixation.
- It represents the ratio of fixation deviation circle surface that used to cover the aoi."""
+ def looked_probabilities(self) -> dict:
+ """Get probabilities to be looked by current fixation for each aoi.
+
+ !!! note
+ aoi where fixation deviation circle never passed the coverage threshold will be missing.
+ """
- return self.__aois_coverages \ No newline at end of file
+ return self.__looked_probabilities \ No newline at end of file
diff --git a/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py b/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py
index 15fddf4..a7b9900 100644
--- a/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py
+++ b/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py
@@ -73,7 +73,7 @@ class Fixation(GazeFeatures.Fixation):
return self
- def draw(self, image: numpy.array, deviation_circle_color: tuple = None, duration_border_color: tuple = None, duration_factor: float = 1.):
+ def draw(self, image: numpy.array, deviation_circle_color: tuple = None, duration_border_color: tuple = None, duration_factor: float = 1., draw_positions: dict = None):
"""Draw fixation into image.
Parameters:
@@ -82,15 +82,20 @@ class Fixation(GazeFeatures.Fixation):
duration_factor: how many pixels per duration unit
"""
+ # Draw duration border if required
+ if duration_border_color is not None:
+
+ cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), duration_border_color, int(self.duration * duration_factor))
+
# Draw deviation circle if required
if deviation_circle_color is not None:
cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), deviation_circle_color, -1)
- # Draw duration border if required
- if duration_border_color is not None:
+ # Draw positions if required
+ if draw_positions is not None:
- cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), duration_border_color, int(self.duration * duration_factor))
+ self.draw_positions(image, **draw_positions)
@dataclass(frozen=True)
class Saccade(GazeFeatures.Saccade):
diff --git a/src/argaze/GazeAnalysis/ExploitExploreRatio.py b/src/argaze/GazeAnalysis/ExploreExploitRatio.py
index f35561f..b4550e7 100644
--- a/src/argaze/GazeAnalysis/ExploitExploreRatio.py
+++ b/src/argaze/GazeAnalysis/ExploreExploitRatio.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
-"""Exploit/Explore ratio module.
+"""Explore/Explore ratio module.
"""
__author__ = "Théo de la Hogue"
@@ -16,13 +16,8 @@ import numpy
@dataclass
class ScanPathAnalyzer(GazeFeatures.ScanPathAnalyzer):
- """Implementation of exploit vs explore ratio algorithm as described in:
+ """Implementation of explore vs exploit ratio algorithm as described in:
- **Goldberg J. H., Kotval X. P. (1999).**
- *Computer interface evaluation using eye movements: methods and constructs.*
- International Journal of Industrial Ergonomics (631–645).
- [https://doi.org/10.1016/S0169-8141(98)00068-7](https://doi.org/10.1016/S0169-8141\\(98\\)00068-7)
-
**Dehais F., Peysakhovich V., Scannella S., Fongue J., Gateau T. (2015).**
*Automation surprise in aviation: Real-time solutions.*
Proceedings of the 33rd annual ACM conference on Human Factors in Computing Systems (2525–2534).
@@ -36,7 +31,7 @@ class ScanPathAnalyzer(GazeFeatures.ScanPathAnalyzer):
super().__init__()
- self.__exploit_explore_ratio = 0.
+ self.__explore_exploit_ratio = 0.
def analyze(self, scan_path: GazeFeatures.ScanPathType):
"""Analyze scan path."""
@@ -63,13 +58,13 @@ class ScanPathAnalyzer(GazeFeatures.ScanPathAnalyzer):
long_fixations_duration = numpy.array(long_fixations_durations).sum()
saccades_duration = numpy.array(saccades_durations).sum()
- assert(saccades_duration + short_fixations_duration > 0)
+ assert(long_fixations_duration > 0)
- self.__exploit_explore_ratio = long_fixations_duration / (saccades_duration + short_fixations_duration)
+ self.__explore_exploit_ratio = (saccades_duration + short_fixations_duration) / long_fixations_duration
@property
- def exploit_explore_ratio(self) -> float:
- """Exploit/Explore ratio."""
+ def explore_exploit_ratio(self) -> float:
+ """Explore/Exploit ratio."""
- return self.__exploit_explore_ratio
+ return self.__explore_exploit_ratio
\ No newline at end of file
diff --git a/src/argaze/GazeAnalysis/FocusPointInside.py b/src/argaze/GazeAnalysis/FocusPointInside.py
index b3651e4..81a9d20 100644
--- a/src/argaze/GazeAnalysis/FocusPointInside.py
+++ b/src/argaze/GazeAnalysis/FocusPointInside.py
@@ -54,14 +54,13 @@ class AOIMatcher(GazeFeatures.AOIMatcher):
return (None, None)
- def draw(self, image: numpy.array, aoi_scene: AOIFeatures.AOIScene, draw_matched_fixation: dict = None, draw_matched_fixation_positions: dict = None, draw_looked_aoi: dict = None, looked_aoi_name_color: tuple = None, looked_aoi_name_offset: tuple = (0, 0)):
+ def draw(self, image: numpy.array, aoi_scene: AOIFeatures.AOIScene, draw_matched_fixation: dict = None, draw_looked_aoi: dict = None, looked_aoi_name_color: tuple = None, looked_aoi_name_offset: tuple = (0, 0)):
"""Draw matching into image.
Parameters:
image: where to draw
aoi_scene: to refresh looked aoi if required
draw_matched_fixation: Fixation.draw parameters (which depends of the loaded gaze movement identifier module, if None, no fixation is drawn)
- draw_matched_fixation_positions: GazeMovement.draw_positions parameters (if None, no fixation is drawn)
draw_looked_aoi: AOIFeatures.AOI.draw parameters (if None, no looked aoi is drawn)
looked_aoi_name_color: color of text (if None, no looked aoi name is drawn)
looked_aoi_name_offset: ofset of text from the upper left aoi bounding box corner
@@ -76,11 +75,6 @@ class AOIMatcher(GazeFeatures.AOIMatcher):
self.__matched_gaze_movement.draw(image, **draw_matched_fixation)
- # Draw matched fixation positions if required
- if draw_matched_fixation_positions is not None:
-
- self.__matched_gaze_movement.draw_positions(image, **draw_matched_fixation_positions)
-
# Draw matched aoi
if self.looked_aoi.all() is not None:
diff --git a/src/argaze/GazeAnalysis/KCoefficient.py b/src/argaze/GazeAnalysis/KCoefficient.py
index 80fe1fd..c50bc3a 100644
--- a/src/argaze/GazeAnalysis/KCoefficient.py
+++ b/src/argaze/GazeAnalysis/KCoefficient.py
@@ -52,19 +52,24 @@ class ScanPathAnalyzer(GazeFeatures.ScanPathAnalyzer):
duration_std = numpy.std(durations)
amplitude_std = numpy.std(amplitudes)
- Ks = []
- for scan_step in scan_path:
+ if duration_std > 0. and amplitude_std > 0.:
+
+ Ks = []
+ for scan_step in scan_path:
+
+ Ks.append((abs(scan_step.duration - duration_mean) / duration_std) - (abs(scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std))
+
+ self.__K = numpy.array(Ks).mean()
- Ks.append(((scan_step.duration - duration_mean) / duration_std) - ((scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std))
+ else:
- self.__K = numpy.array(Ks).mean()
+ self.__K = 0.
@property
def K(self) -> float:
"""K coefficient."""
return self.__K
-
@dataclass
class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer):
@@ -104,12 +109,18 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer):
duration_std = numpy.std(durations)
amplitude_std = numpy.std(amplitudes)
- Ks = []
- for aoi_scan_step in aoi_scan_path:
+ if duration_std > 0. and amplitude_std > 0.:
+
+ Ks = []
+ for aoi_scan_step in aoi_scan_path:
+
+ Ks.append((abs(aoi_scan_step.duration - duration_mean) / duration_std) - (abs(aoi_scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std))
+
+ self.__K = numpy.array(Ks).mean()
- Ks.append(((aoi_scan_step.duration - duration_mean) / duration_std) - ((aoi_scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std))
+ else:
- self.__K = numpy.array(Ks).mean()
+ self.__K = 0.
@property
def K(self) -> float:
diff --git a/src/argaze/GazeAnalysis/LinearRegression.py b/src/argaze/GazeAnalysis/LinearRegression.py
new file mode 100644
index 0000000..0e10b87
--- /dev/null
+++ b/src/argaze/GazeAnalysis/LinearRegression.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+
+"""Module for gaze position calibration based on linear regression.
+"""
+
+__author__ = "Théo de la Hogue"
+__credits__ = []
+__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)"
+__license__ = "BSD"
+
+from typing import TypeVar, Tuple
+from dataclasses import dataclass, field
+
+from argaze import GazeFeatures
+
+from sklearn.linear_model import LinearRegression
+import numpy
+import cv2
+
+GazePositionType = TypeVar('GazePositionType', bound="GazePositionType")
+# Type definition for type annotation convenience
+
+@dataclass
+class GazePositionCalibrator(GazeFeatures.GazePositionCalibrator):
+ """Calibration algorithm based on linear regression."""
+
+ coefficients: numpy.array = field(default_factory=lambda : numpy.array([[1., 0.], [0., 1.]]))
+ """Linear regression coefficients"""
+
+ intercept: numpy.array = field(default_factory=lambda : numpy.array([0., 0.]))
+ """Linear regression intercept value"""
+
+ def __post_init__(self):
+ """Init calibration."""
+
+ self.__linear_regression = LinearRegression()
+ self.__linear_regression.coef_ = numpy.array(self.coefficients)
+ self.__linear_regression.intercept_ = numpy.array(self.intercept)
+
+ def store(self, timestamp: int|float, observed_gaze_position: GazeFeatures.GazePosition, expected_gaze_position: GazeFeatures.GazePosition):
+ """Store observed and expected gaze positions."""
+
+ self.__observed_positions.append(observed_gaze_position.value)
+ self.__expected_positions.append(expected_gaze_position.value)
+
+ def reset(self):
+ """Reset observed and expected gaze positions."""
+
+ self.__observed_positions = []
+ self.__expected_positions = []
+ self.__linear_regression = None
+
+ def calibrate(self) -> float:
+ """Process calibration from observed and expected gaze positions.
+
+ Returns:
+ score: the score of linear regression
+ """
+
+ self.__linear_regression = LinearRegression().fit(self.__observed_positions, self.__expected_positions)
+
+ # Update frozen coefficients attribute
+ object.__setattr__(self, 'coefficients', self.__linear_regression.coef_)
+
+ # Update frozen intercept attribute
+ object.__setattr__(self, 'intercept', self.__linear_regression.intercept_)
+
+ # Return calibrated gaze position
+ return self.__linear_regression.score(self.__observed_positions, self.__expected_positions)
+
+ def apply(self, gaze_position: GazeFeatures.GazePosition) -> GazePositionType:
+ """Apply calibration onto observed gaze position."""
+
+ if not self.calibrating:
+
+ return GazeFeatures.GazePosition(self.__linear_regression.predict(numpy.array([gaze_position.value]))[0], precision=gaze_position.precision)
+
+ else:
+
+ return gaze_position
+
+ def draw(self, image: numpy.array, size: tuple, resolution: tuple, line_color: tuple = (0, 0, 0), thickness: int = 1):
+ """Draw calibration field."""
+
+ width, height = size
+
+ if width * height > 0:
+
+ rx, ry = resolution
+ lx = numpy.linspace(0, width, rx)
+ ly = numpy.linspace(0, height, ry)
+ xv, yv = numpy.meshgrid(lx, ly, indexing='ij')
+
+ for i in range(rx):
+
+ for j in range(ry):
+
+ start = (xv[i][j], yv[i][j])
+ end = self.apply(GazeFeatures.GazePosition(start)).value
+
+ cv2.line(image, (int(start[0]), int(start[1])), (int(end[0]), int(end[1])), line_color, thickness)
+
+ @property
+ def calibrating(self) -> bool:
+ """Is the calibration running?"""
+
+ return self.__linear_regression is None \ No newline at end of file
diff --git a/src/argaze/GazeAnalysis/TransitionMatrix.py b/src/argaze/GazeAnalysis/TransitionMatrix.py
index 6f408e4..b346b5a 100644
--- a/src/argaze/GazeAnalysis/TransitionMatrix.py
+++ b/src/argaze/GazeAnalysis/TransitionMatrix.py
@@ -42,7 +42,7 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer):
row_sum = aoi_scan_path.transition_matrix.apply(lambda row: row.sum(), axis=1)
# Editing transition matrix probabilities
- # Note: when no transiton starts from an aoi, destination probabilites is equal to 1/S where S is the number of aois
+ # Note: when no transiton starts from an aoi, destination probabilites is equal to 1/S where S is the number of aoi
self.__transition_matrix_probabilities = aoi_scan_path.transition_matrix.apply(lambda row: row.apply(lambda p: p / row_sum[row.name] if row_sum[row.name] > 0 else 1 / row_sum.size), axis=1)
# Calculate matrix density
diff --git a/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py b/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py
index 64931f5..d10f666 100644
--- a/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py
+++ b/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py
@@ -72,7 +72,7 @@ class Fixation(GazeFeatures.Fixation):
return self
- def draw(self, image: numpy.array, deviation_circle_color: tuple = None, duration_border_color: tuple = None, duration_factor: float = 1.):
+ def draw(self, image: numpy.array, deviation_circle_color: tuple = None, duration_border_color: tuple = None, duration_factor: float = 1., draw_positions: dict = None):
"""Draw fixation into image.
Parameters:
@@ -81,15 +81,20 @@ class Fixation(GazeFeatures.Fixation):
duration_factor: how many pixels per duration unit
"""
+ # Draw duration border if required
+ if duration_border_color is not None:
+
+ cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), duration_border_color, int(self.duration * duration_factor))
+
# Draw deviation circle if required
if deviation_circle_color is not None:
cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), deviation_circle_color, -1)
- # Draw duration border if required
- if duration_border_color is not None:
+ # Draw positions if required
+ if draw_positions is not None:
- cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), duration_border_color, int(self.duration * duration_factor))
+ self.draw_positions(image, **draw_positions)
@dataclass(frozen=True)
class Saccade(GazeFeatures.Saccade):
diff --git a/src/argaze/GazeAnalysis/__init__.py b/src/argaze/GazeAnalysis/__init__.py
index 164de74..c110eb1 100644
--- a/src/argaze/GazeAnalysis/__init__.py
+++ b/src/argaze/GazeAnalysis/__init__.py
@@ -1,4 +1,4 @@
"""
Various gaze movement identification, AOI matching and scan path analysis algorithms.
"""
-__all__ = ['Basic', 'DispersionThresholdIdentification', 'VelocityThresholdIdentification', 'TransitionMatrix', 'KCoefficient', 'LempelZivComplexity', 'NGram', 'Entropy', 'NearestNeighborIndex', 'ExploitExploreRatio'] \ No newline at end of file
+__all__ = ['Basic', 'DispersionThresholdIdentification', 'VelocityThresholdIdentification', 'TransitionMatrix', 'KCoefficient', 'LempelZivComplexity', 'NGram', 'Entropy', 'NearestNeighborIndex', 'ExploreExploitRatio', 'LinearRegression'] \ No newline at end of file
diff --git a/src/argaze/GazeFeatures.py b/src/argaze/GazeFeatures.py
index 2dd1cab..46e9f17 100644
--- a/src/argaze/GazeFeatures.py
+++ b/src/argaze/GazeFeatures.py
@@ -12,6 +12,7 @@ from dataclasses import dataclass, field
import math
import ast
import json
+import importlib
from inspect import getmembers
from argaze import DataStructures
@@ -201,6 +202,113 @@ class TimeStampedGazePositions(DataStructures.TimeStampedBuffer):
return TimeStampedGazePositions(df.to_dict('index'))
+class GazePositionCalibrationFailed(Exception):
+ """Exception raised by GazePositionCalibrator."""
+
+ def __init__(self, message):
+
+ super().__init__(message)
+
+GazePositionCalibratorType = TypeVar('GazePositionCalibrator', bound="GazePositionCalibrator")
+# Type definition for type annotation convenience
+
+@dataclass
+class GazePositionCalibrator():
+ """Abstract class to define what should provide a gaze position calibrator algorithm."""
+
+ @classmethod
+ def from_dict(self, calibrator_data: dict) -> GazePositionCalibratorType:
+ """Load gaze position calibrator from dictionary.
+
+ Parameters:
+ calibrator_data: dictionary with class name and attributes to load
+ """
+ gaze_position_calibrator_module_path, gaze_position_calibrator_parameters = calibrator_data.popitem()
+
+ # Prepend argaze.GazeAnalysis path when a single name is provided
+ if len(gaze_position_calibrator_module_path.split('.')) == 1:
+ gaze_position_calibrator_module_path = f'argaze.GazeAnalysis.{gaze_position_calibrator_module_path}'
+
+ gaze_position_calibrator_module = importlib.import_module(gaze_position_calibrator_module_path)
+ return gaze_position_calibrator_module.GazePositionCalibrator(**gaze_position_calibrator_parameters)
+
+ @classmethod
+ def from_json(self, json_filepath: str) -> GazePositionCalibratorType:
+ """Load calibrator from .json file."""
+
+ # Remember file path to ease rewriting
+ self.__json_filepath = json_filepath
+
+ # Open file
+ with open(self.__json_filepath) as calibration_file:
+
+ return GazePositionCalibrator.from_dict(json.load(calibration_file))
+
+ def to_json(self, json_filepath: str = None):
+ """Save calibrator into .json file."""
+
+ # Remember file path to ease rewriting
+ if json_filepath is not None:
+
+ self.__json_filepath = json_filepath
+
+ # Open file
+ with open(self.__json_filepath, 'w', encoding='utf-8') as calibration_file:
+
+ json.dump({DataStructures.module_path(self):DataStructures.JsonEncoder().default(self)}, calibration_file, ensure_ascii=False, indent=4)
+
+ def store(self, timestamp: int|float, observed_gaze_position: GazePosition, expected_gaze_position: GazePosition):
+ """Store observed and expected gaze positions.
+
+ Parameters:
+ timestamp: time of observed gaze position
+ observed_gaze_position: where gaze position actually is
+ expected_gaze_position: where gaze position should be
+ """
+
+ raise NotImplementedError('calibrate() method not implemented')
+
+ def reset(self):
+ """Reset observed and expected gaze positions."""
+
+ raise NotImplementedError('reset() method not implemented')
+
+ def calibrate(self) -> Any:
+ """Process calibration from observed and expected gaze positions.
+
+ Returns:
+ calibration outputs: any data returned to assess calibration
+ """
+
+ raise NotImplementedError('terminate() method not implemented')
+
+ def apply(self, observed_gaze_position: GazePosition) -> GazePositionType:
+ """Apply calibration onto observed gaze position.
+
+ Parameters:
+ observed_gaze_position: where gaze position actually is
+
+ Returns:
+ expected_gaze_position: where gaze position should be if the calibrator is ready else, observed gaze position
+ """
+
+ raise NotImplementedError('apply() method not implemented')
+
+ def draw(self, image: numpy.array):
+ """Draw calibration into image.
+
+ Parameters:
+ image: where to draw
+ """
+
+ raise NotImplementedError('draw() method not implemented')
+
+ @property
+ def calibrating(self) -> bool:
+ """Is the calibration running?"""
+
+ raise NotImplementedError('ready getter not implemented')
+
GazeMovementType = TypeVar('GazeMovement', bound="GazeMovement")
# Type definition for type annotation convenience
@@ -292,16 +400,16 @@ class GazeMovement():
ts_start, start_gaze_position = gaze_positions.pop_first()
ts_next, next_gaze_position = gaze_positions.first
- # Draw position if required
- if position_color is not None:
-
- start_gaze_position.draw(image, position_color, draw_precision=False)
-
# Draw line between positions if required
if line_color is not None:
cv2.line(image, (int(start_gaze_position[0]), int(start_gaze_position[1])), (int(next_gaze_position[0]), int(next_gaze_position[1])), line_color, 1)
+ # Draw position if required
+ if position_color is not None:
+
+ start_gaze_position.draw(image, position_color, draw_precision=False)
+
def draw(self, image: numpy.array, **kwargs):
"""Draw gaze movement into image."""
@@ -545,7 +653,7 @@ ScanStepType = TypeVar('ScanStep', bound="ScanStep")
# Type definition for type annotation convenience
class ScanStepError(Exception):
- """Exception raised at ScanStepError creation if a aoi scan step doesn't start by a fixation or doesn't end by a saccade."""
+ """Exception raised at ScanStep creation if a aoi scan step doesn't start by a fixation or doesn't end by a saccade."""
def __init__(self, message):
@@ -755,7 +863,7 @@ AOIScanStepType = TypeVar('AOIScanStep', bound="AOIScanStep")
# Type definition for type annotation convenience
class AOIScanStepError(Exception):
- """Exception raised at AOIScanStepError creation if a aoi scan step doesn't start by a fixation or doesn't end by a saccade."""
+ """Exception raised at AOIScanStep creation if a aoi scan step doesn't start by a fixation or doesn't end by a saccade."""
def __init__(self, message, aoi=''):
@@ -842,13 +950,13 @@ AOIScanPathType = TypeVar('AOIScanPathType', bound="AOIScanPathType")
class AOIScanPath(list):
"""List of aoi scan steps over successive aoi."""
- def __init__(self, expected_aois: list[str] = [], duration_max: int|float = 0):
+ def __init__(self, expected_aoi: list[str] = [], duration_max: int|float = 0):
super().__init__()
self.duration_max = duration_max
-
- self.expected_aois = expected_aois
+ self.expected_aoi = expected_aoi
+
self.__duration = 0
@property
@@ -903,13 +1011,13 @@ class AOIScanPath(list):
return sequence
@property
- def expected_aois(self):
+ def expected_aoi(self):
"""List of all expected aoi."""
- return self.__expected_aois
+ return self.__expected_aoi
- @expected_aois.setter
- def expected_aois(self, expected_aois: list[str] = []):
+ @expected_aoi.setter
+ def expected_aoi(self, expected_aoi: list[str] = []):
"""Edit list of all expected aoi.
!!! warning
@@ -917,15 +1025,15 @@ class AOIScanPath(list):
"""
self.clear()
- self.__expected_aois = expected_aois
+ self.__expected_aoi = expected_aoi
self.__movements = TimeStampedGazeMovements()
self.__current_aoi = ''
self.__index = ord('A')
self.__aoi_letter = {}
self.__letter_aoi = {}
- size = len(self.__expected_aois)
- self.__transition_matrix = pandas.DataFrame(numpy.zeros((size, size)), index=self.__expected_aois, columns=self.__expected_aois)
+ size = len(self.__expected_aoi)
+ self.__transition_matrix = pandas.DataFrame(numpy.zeros((size, size)), index=self.__expected_aoi, columns=self.__expected_aoi)
@property
def current_aoi(self):
@@ -953,7 +1061,7 @@ class AOIScanPath(list):
!!! warning
It could raise AOIScanStepError"""
- if looked_aoi not in self.__expected_aois:
+ if looked_aoi not in self.__expected_aoi:
raise AOIScanStepError('AOI not expected', looked_aoi)
@@ -1013,7 +1121,7 @@ class AOIScanPath(list):
"""Get how many fixations are there in the scan path and how many fixation are there in each aoi."""
scan_fixations_count = 0
- aoi_fixations_count = {aoi: 0 for aoi in self.__expected_aois}
+ aoi_fixations_count = {aoi: 0 for aoi in self.__expected_aoi}
for aoi_scan_step in self:
diff --git a/src/argaze/utils/aruco_markers_group_export.py b/src/argaze/utils/aruco_markers_group_export.py
new file mode 100644
index 0000000..d948105
--- /dev/null
+++ b/src/argaze/utils/aruco_markers_group_export.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+
+""" """
+
+__author__ = "Théo de la Hogue"
+__credits__ = []
+__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)"
+__license__ = "BSD"
+
+import argparse
+import time
+import itertools
+
+from argaze.ArUcoMarkers import ArUcoCamera, ArUcoMarkersGroup
+from argaze.utils import UtilsFeatures
+
+import cv2
+import numpy
+
+def main():
+ """
+ Load a MOVIE and an ArUcoCamera CONFIGURATION to detect ArUco markers inside a selected movie frame then, export detected ArUco markers group as .obj file into an OUTPUT folder.
+ """
+
+ # Manage arguments
+ parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
+ parser.add_argument('movie', metavar='MOVIE', type=str, default=None, help='movie path')
+ parser.add_argument('configuration', metavar='CONFIGURATION', type=str, default=None, help='ArUco camera configuration')
+
+ parser.add_argument('-s','--start', metavar='START', type=float, default=0., help='start time in second')
+ parser.add_argument('-o', '--output', metavar='OUTPUT', type=str, default='.', help='export folder path')
+ args = parser.parse_args()
+
+ # Load movie
+ video_capture = cv2.VideoCapture(args.movie)
+
+ video_fps = video_capture.get(cv2.CAP_PROP_FPS)
+ image_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
+ image_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
+
+ # Load ArUco camera
+ aruco_camera = ArUcoCamera.ArUcoCamera.from_json(args.configuration)
+
+ # Create empty ArUco scene
+ aruco_markers_group = None
+
+ # Create a window
+ cv2.namedWindow(aruco_camera.name, cv2.WINDOW_AUTOSIZE)
+
+ # Enable exit signal handler
+ exit = UtilsFeatures.ExitSignalHandler()
+
+ # Init image selection
+ current_image_index = -1
+ _, current_image = video_capture.read()
+ next_image_index = int(args.start * video_fps)
+ refresh = False
+
+ while not exit.status():
+
+ # Select a new image and detect markers once
+ if next_image_index != current_image_index or refresh:
+
+ video_capture.set(cv2.CAP_PROP_POS_FRAMES, next_image_index)
+
+ success, video_image = video_capture.read()
+
+ if success:
+
+ # Refresh once
+ refresh = False
+
+ current_image_index = video_capture.get(cv2.CAP_PROP_POS_FRAMES) - 1
+ current_image_time = video_capture.get(cv2.CAP_PROP_POS_MSEC)
+
+ # Detect markers
+ detection_time, projection_time, exceptions = aruco_camera.watch(video_image)
+
+ # Estimate each markers pose
+ aruco_camera.aruco_detector.estimate_markers_pose(aruco_camera.aruco_detector.detected_markers)
+
+ # Build aruco scene from detected markers
+ aruco_markers_group = ArUcoMarkersGroup.ArUcoMarkersGroup(aruco_camera.aruco_detector.marker_size, aruco_camera.aruco_detector.dictionary, aruco_camera.aruco_detector.detected_markers)
+
+ # Get camera image
+ camera_image = aruco_camera.image()
+
+ # Write detected markers
+ cv2.putText(camera_image, f'Detecting markers {list(aruco_camera.aruco_detector.detected_markers.keys())}', (20, aruco_camera.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+
+ # Write timing
+ cv2.putText(camera_image, f'Frame at {int(current_image_time)}ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(camera_image, f'Detection {int(detection_time)}ms', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(camera_image, f'Projection {int(projection_time)}ms', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+
+ # Write documentation
+ cv2.putText(camera_image, f'<- previous image', (aruco_camera.size[0]-500, aruco_camera.size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(camera_image, f'-> next image', (aruco_camera.size[0]-500, aruco_camera.size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(camera_image, f'Ctrl+s: export ArUco markers', (aruco_camera.size[0]-500, aruco_camera.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+
+ # Copy image
+ current_image = camera_image.copy()
+
+ # Keep last image
+ else:
+
+ video_image = current_image.copy()
+
+ key_pressed = cv2.waitKey(10)
+
+ #if key_pressed != -1:
+ # print(key_pressed)
+
+ # Select previous image with left arrow
+ if key_pressed == 2:
+ next_image_index -= 1
+
+ # Select next image with right arrow
+ if key_pressed == 3:
+ next_image_index += 1
+
+ # Clip image index
+ if next_image_index < 0:
+ next_image_index = 0
+
+ # r: reload configuration
+ if key_pressed == 114:
+
+ aruco_camera = ArUcoCamera.ArUcoCamera.from_json(args.configuration)
+ refresh = True
+ print('Configuration reloaded')
+
+ # Save selected marker edition using 'Ctrl + s'
+ if key_pressed == 19:
+
+ if aruco_markers_group:
+
+ aruco_markers_group.to_obj(f'{args.output}/{int(current_image_time)}-aruco_markers_group.obj')
+ print(f'ArUco markers saved into {args.output}')
+
+ else:
+
+ print(f'No ArUco markers to export')
+
+ # Close window using 'Esc' key
+ if key_pressed == 27:
+ break
+
+ # Display video
+ cv2.imshow(aruco_camera.name, video_image)
+
+ # Close movie capture
+ video_capture.release()
+
+ # Stop image display
+ cv2.destroyAllWindows()
+
+if __name__ == '__main__':
+
+ main() \ No newline at end of file
diff --git a/src/argaze/utils/aruco_markers_scene_export.py b/src/argaze/utils/aruco_markers_scene_export.py
deleted file mode 100644
index f618342..0000000
--- a/src/argaze/utils/aruco_markers_scene_export.py
+++ /dev/null
@@ -1,176 +0,0 @@
-#!/usr/bin/env python
-
-""" """
-
-__author__ = "Théo de la Hogue"
-__credits__ = []
-__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)"
-__license__ = "BSD"
-
-import argparse
-import time
-import itertools
-
-from argaze.ArUcoMarkers import ArUcoMarkersDictionary, ArUcoOpticCalibrator, ArUcoDetector, ArUcoMarkersGroup
-from argaze.utils import UtilsFeatures
-
-import cv2
-import numpy
-
-def main():
- """
- Load a movie with ArUco markers inside and select image into it, detect ArUco markers belonging to a given dictionary and size into the selected image thanks to given optic parameters and detector parameters then, export detected ArUco scene as .obj file.
- """
-
- # Manage arguments
- parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
- parser.add_argument('movie', metavar='MOVIE', type=str, default=None, help='movie path')
- parser.add_argument('dictionary', metavar='DICTIONARY', type=str, default=None, help='ArUco dictionary to detect')
- parser.add_argument('marker_size', metavar='MARKER_SIZE', type=int, default=3, help='marker size in cm')
- parser.add_argument('optic_parameters', metavar='OPTIC_PARAMETERS', type=str, default=None, help='Optic parameters from camera calibration process')
- parser.add_argument('detector_parameters', metavar='DETECTOR_PARAMETERS', type=str, default=None, help='ArUco detector parameters')
-
- parser.add_argument('-s','--start', metavar='START', type=float, default=0., help='start time in second')
- parser.add_argument('-o', '--output', metavar='OUT', type=str, default='.', help='export scene folder path')
- args = parser.parse_args()
-
- # Load movie
- video_capture = cv2.VideoCapture(args.movie)
-
- video_fps = video_capture.get(cv2.CAP_PROP_FPS)
- image_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
- image_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
-
- # Load ArUco dictionary
- aruco_dictionary = ArUcoMarkersDictionary.ArUcoMarkersDictionary(args.dictionary)
-
- # Load optic parameters
- optic_parameters = ArUcoOpticCalibrator.OpticParameters.from_json(args.optic_parameters)
-
- # Load detector parameters
- detector_parameters = ArUcoDetector.DetectorParameters.from_json(args.detector_parameters)
-
- # Create ArUco detector
- aruco_detector = ArUcoDetector.ArUcoDetector(dictionary=aruco_dictionary, marker_size=args.marker_size, optic_parameters=optic_parameters, parameters=detector_parameters)
-
- # Create empty ArUco scene
- aruco_markers_group = None
-
- # Create a window to display AR environment
- window_name = "Export ArUco scene"
- cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
-
- # Enable exit signal handler
- exit = UtilsFeatures.ExitSignalHandler()
-
- # Init image selection
- current_image_index = -1
- _, current_image = video_capture.read()
- next_image_index = int(args.start * video_fps)
- refresh = False
-
- # Hide help
- draw_help = False
-
- while not exit.status():
-
- # Select a new image and detect markers once
- if next_image_index != current_image_index or refresh:
-
- video_capture.set(cv2.CAP_PROP_POS_FRAMES, next_image_index)
-
- success, video_image = video_capture.read()
-
- if success:
-
- # Refresh once
- refresh = False
-
- current_image_index = video_capture.get(cv2.CAP_PROP_POS_FRAMES) - 1
- current_image_time = video_capture.get(cv2.CAP_PROP_POS_MSEC)
-
- # Detect markers
- aruco_detector.detect_markers(video_image)
-
- # Estimate markers pose
- aruco_detector.estimate_markers_pose()
-
- # Build aruco scene from detected markers
- aruco_markers_group = ArUcoMarkersGroup.ArUcoMarkersGroup(args.marker_size, aruco_dictionary, aruco_detector.detected_markers)
-
- # Write scene detected markers
- cv2.putText(video_image, f'{list(aruco_detector.detected_markers.keys())}', (20, image_height-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
-
- # Write timing
- cv2.putText(video_image, f'Time: {int(current_image_time)} ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
-
- # Copy image
- current_image = video_image.copy()
-
- # Keep last image
- else:
-
- video_image = current_image.copy()
-
- # Draw detected markers
- aruco_detector.draw_detected_markers(video_image, {"color": [0, 255, 0], "draw_axes": {"thickness": 4}})
-
- # Write documentation
- cv2.putText(video_image, f'Press \'h\' for help', (950, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
-
- if draw_help:
-
- cv2.rectangle(video_image, (0, 50), (500, 300), (127, 127, 127), -1)
- cv2.putText(video_image, f'> Left arrow: previous image', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_image, f'> Right arrow: next image', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_image, f'> Ctrl+s: export ArUco scene', (20, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
-
- key_pressed = cv2.waitKey(10)
-
- #if key_pressed != -1:
- # print(key_pressed)
-
- # Select previous image with left arrow
- if key_pressed == 2:
- next_image_index -= 1
-
- # Select next image with right arrow
- if key_pressed == 3:
- next_image_index += 1
-
- # Clip image index
- if next_image_index < 0:
- next_image_index = 0
-
- # Switch help mode with h key
- if key_pressed == 104:
- draw_help = not draw_help
-
- # Save selected marker edition using 'Ctrl + s'
- if key_pressed == 19:
-
- if aruco_markers_group:
-
- aruco_markers_group.to_obj(f'{args.output}/{int(current_image_time)}-aruco_markers_group.obj')
- print(f'ArUco scene saved into {args.output}')
-
- else:
-
- print(f'No ArUco scene to export')
-
- # Close window using 'Esc' key
- if key_pressed == 27:
- break
-
- # Display video
- cv2.imshow(window_name, video_image)
-
- # Close movie capture
- video_capture.release()
-
- # Stop image display
- cv2.destroyAllWindows()
-
-if __name__ == '__main__':
-
- main() \ No newline at end of file
diff --git a/src/argaze/utils/demo_aruco_markers_run.py b/src/argaze/utils/demo_aruco_markers_run.py
index 6dc081d..5e1ac2e 100644
--- a/src/argaze/utils/demo_aruco_markers_run.py
+++ b/src/argaze/utils/demo_aruco_markers_run.py
@@ -14,6 +14,7 @@ import time
from argaze import ArFeatures, GazeFeatures
from argaze.ArUcoMarkers import ArUcoCamera
+from argaze.utils import UtilsFeatures
import cv2
import numpy
@@ -40,9 +41,29 @@ def main():
# Init timestamp
start_time = time.time()
+ # Prepare gaze analysis assessment
+ call_chrono = UtilsFeatures.TimeProbe()
+ call_chrono.start()
+
+ gaze_positions_frequency = 0
+ gaze_analysis_time = 0
+
# Fake gaze position with mouse pointer
def on_mouse_event(event, x, y, flags, param):
+ nonlocal gaze_positions_frequency
+ nonlocal gaze_analysis_time
+
+ # Assess gaze analysis
+ lap_time, nb_laps, elapsed_time = call_chrono.lap()
+
+ if elapsed_time > 1e3:
+
+ gaze_positions_frequency = nb_laps
+ call_chrono.restart()
+
+ gaze_analysis_time = 0
+
# Edit millisecond timestamp
timestamp = int((time.time() - start_time) * 1e3)
@@ -54,12 +75,20 @@ def main():
gaze_movement, scan_step_analysis, layer_analysis, execution_times, exception = look_data
- # Do something with look data
- # ...
+ # Assess gaze analysis
+ gaze_analysis_time += execution_times['total']
# Attach mouse callback to window
cv2.setMouseCallback(aruco_camera.name, on_mouse_event)
+ # Prepare video fps assessment
+ video_fps = 0
+ video_chrono = UtilsFeatures.TimeProbe()
+ video_chrono.start()
+
+ # Prepare visualisation time assessment
+ visualisation_time = 0
+
# Enable camera video capture into separate thread
video_capture = cv2.VideoCapture(int(args.source) if args.source.isdecimal() else args.source)
@@ -69,30 +98,48 @@ def main():
# Capture images
while video_capture.isOpened():
+ # Assess capture time
+ capture_start = time.time()
+
# Read video image
success, video_image = video_capture.read()
+ # Assess capture time
+ capture_time = int((time.time() - capture_start) * 1e3)
+
if success:
+ # Assess video fps
+ lap_time, nb_laps, elapsed_time = video_chrono.lap()
+
+ if elapsed_time > 1e3:
+
+ video_fps = nb_laps
+ video_chrono.restart()
+
# Detect and project AR features
- detection_time, exceptions = aruco_camera.watch(video_image)
+ detection_time, projection_time, exceptions = aruco_camera.watch(video_image)
+
+ # Assess visualisation time
+ visualisation_start = time.time()
# Get ArUcoCamera frame image
aruco_camera_image = aruco_camera.image()
- # Write detection fps
- cv2.rectangle(aruco_camera_image, (0, 0), (420, 50), (63, 63, 63), -1)
- cv2.putText(aruco_camera_image, f'Detection fps: {1e3/detection_time:.1f}', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ # Write time info
+ cv2.rectangle(aruco_camera_image, (0, 0), (aruco_camera.size[0], 100), (63, 63, 63), -1)
+ cv2.putText(aruco_camera_image, f'{video_fps} FPS | Capture {capture_time}ms | Detection {int(detection_time)}ms | Projection {int(projection_time)}ms | Visualisation {visualisation_time}ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(aruco_camera_image, f'{gaze_positions_frequency} gaze positions/s | Gaze analysis {gaze_analysis_time:.2f}ms', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
# Handle exceptions
for i, (scene_name, e) in enumerate(exceptions.items()):
# Write errors
- cv2.rectangle(aruco_camera_image, (0, (i+1)*50), (720, (i+2)*50), (127, 127, 127), -1)
- cv2.putText(aruco_camera_image, f'{scene_name} error: {e}', (20, (i+1)*90), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.rectangle(aruco_camera_image, (0, (i+1)*100), (aruco_camera.size[0], (i+2)*80), (127, 127, 127), -1)
+ cv2.putText(aruco_camera_image, f'{scene_name} error: {e}', (20, (i+1)*140), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
# Write hint
- cv2.putText(aruco_camera_image, 'Mouve mouse pointer over gray rectangle area', (450, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(aruco_camera_image, 'Mouve mouse pointer over gray rectangle area', (20, aruco_camera.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
# Display ArUcoCamera frame image
cv2.imshow(aruco_camera.name, aruco_camera_image)
@@ -103,12 +150,21 @@ def main():
# Display scene frame
cv2.imshow(f'{scene_frame.parent.name}:{scene_frame.name}', scene_frame.image())
+ else:
+
+ # Assess visualisation time
+ visualisation_start = time.time()
+
# Stop by pressing 'Esc' key
- if cv2.waitKey(10) == 27:
+ # NOTE: on MacOS, cv2.waitKey(1) waits ~40ms
+ if cv2.waitKey(1) == 27:
# Close camera video capture
video_capture.release()
+ # Assess visualisation time
+ visualisation_time = int((time.time() - visualisation_start) * 1e3)
+
# Stop image display
cv2.destroyAllWindows()
diff --git a/src/argaze/utils/demo_data/aoi_2d_scene.json b/src/argaze/utils/demo_data/aoi_2d_scene.json
new file mode 100644
index 0000000..ac58b63
--- /dev/null
+++ b/src/argaze/utils/demo_data/aoi_2d_scene.json
@@ -0,0 +1,18 @@
+{
+ "BlueTriangle":[[960, 664], [1113, 971], [806, 971]],
+ "RedSquare": {
+ "Rectangle": {
+ "x": 268,
+ "y": 203,
+ "width": 308,
+ "height": 308
+ }
+ },
+ "GreenCircle": {
+ "Circle": {
+ "cx": 1497,
+ "cy": 356,
+ "radius": 153
+ }
+ }
+} \ No newline at end of file
diff --git a/src/argaze/utils/demo_data/aoi_3d_scene.obj b/src/argaze/utils/demo_data/aoi_3d_scene.obj
index d32e235..0ce97de 100644
--- a/src/argaze/utils/demo_data/aoi_3d_scene.obj
+++ b/src/argaze/utils/demo_data/aoi_3d_scene.obj
@@ -1,5 +1,3 @@
-# Blender v3.0.1 OBJ File: 'ar_camera.blend'
-# www.blender.org
o GrayRectangle
v 0.000000 0.000000 0.000000
v 25.000000 0.000000 0.000000
@@ -7,51 +5,3 @@ v 0.000000 14.960000 0.000000
v 25.000000 14.960000 0.000000
s off
f 1 2 4 3
-o RedSquare
-v 3.497026 8.309391 0.000000
-v 7.504756 8.309391 0.000000
-v 3.497026 12.314838 0.001030
-v 7.504756 12.314838 0.001030
-s off
-f 5 6 8 7
-o BlueTriangle
-v 10.500295 2.307687 0.000000
-v 14.503224 2.306344 0.000000
-v 12.502419 6.312207 0.001030
-s off
-f 9 10 11
-o GreenCircle
-v 19.495552 12.311101 0.000000
-v 19.105371 12.272672 0.000000
-v 18.730185 12.158860 0.000000
-v 18.384411 11.974040 0.000000
-v 18.081339 11.725314 0.000000
-v 17.832613 11.422241 0.000000
-v 17.647793 11.076468 0.000000
-v 17.533981 10.701282 0.000000
-v 17.495552 10.311101 0.000000
-v 17.533981 9.920920 0.000000
-v 17.647793 9.545734 0.000000
-v 17.832613 9.199961 0.000000
-v 18.081339 8.896888 0.000000
-v 18.384411 8.648162 0.000000
-v 18.730185 8.463342 0.000000
-v 19.105371 8.349530 0.000000
-v 19.495552 8.311101 0.000000
-v 19.885733 8.349530 0.000000
-v 20.260920 8.463342 0.000000
-v 20.606693 8.648162 0.000000
-v 20.909765 8.896887 0.000000
-v 21.158491 9.199960 0.000000
-v 21.343311 9.545733 0.000000
-v 21.457123 9.920920 0.000000
-v 21.495552 10.311101 0.000000
-v 21.457123 10.701282 0.000000
-v 21.343311 11.076468 0.000000
-v 21.158491 11.422241 0.000000
-v 20.909765 11.725314 0.000000
-v 20.606693 11.974040 0.000000
-v 20.260920 12.158860 0.000000
-v 19.885733 12.272672 0.000000
-s off
-f 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 12
diff --git a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json
index 9a3b79f..7a4f6d1 100644
--- a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json
+++ b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json
@@ -5,9 +5,10 @@
"dictionary": "DICT_APRILTAG_16h5",
"marker_size": 5,
"parameters": {
- "cornerRefinementMethod": 1,
+ "cornerRefinementMethod": 3,
"aprilTagQuadSigma": 2,
- "aprilTagDeglitch": 1
+ "aprilTagDeglitch": 1,
+ "useAruco3Detection": 1
}
},
"layers": {
@@ -40,7 +41,21 @@
"height": 72,
"z": 100,
"point_size": 1,
- "point_color": [0, 0, 255]
+ "point_color": [127, 127, 127]
+ },
+ "draw_scenes": {
+ "ArScene Demo": {
+ "draw_aruco_markers_group": {
+ "draw_axes": {
+ "thickness": 3,
+ "length": 10
+ },
+ "draw_places": {
+ "color": [0, 0, 0],
+ "border_size": 1
+ }
+ }
+ }
}
},
"scenes": {
@@ -53,11 +68,11 @@
},
"frames": {
"GrayRectangle": {
- "size": [640, 383],
+ "size": [1920, 1149],
"background": "frame_background.jpg",
"gaze_movement_identifier": {
"DispersionThresholdIdentification": {
- "deviation_max_threshold": 25,
+ "deviation_max_threshold": 50,
"duration_min_threshold": 200
}
},
@@ -65,12 +80,10 @@
"duration_max": 10000
},
"layers": {
- "GrayRectangle": {
- "aoi_scene": "aoi_3d_scene.obj",
+ "main_layer": {
+ "aoi_scene": "aoi_2d_scene.json",
"aoi_matcher": {
- "FocusPointInside": {
- "exclude": ["GrayRectangle"]
- }
+ "FocusPointInside": {}
}
}
},
@@ -82,16 +95,16 @@
"heatmap_weight": 0.5,
"draw_scan_path": {
"draw_fixations": {
- "deviation_circle_color": [0, 255, 255],
- "duration_border_color": [0, 127, 127],
+ "deviation_circle_color": [255, 0, 255],
+ "duration_border_color": [127, 0, 127],
"duration_factor": 1e-2
},
"draw_saccades": {
- "line_color": [0, 255, 255]
+ "line_color": [255, 0, 255]
}
},
"draw_layers": {
- "GrayRectangle": {
+ "main_layer": {
"draw_aoi_scene": {
"draw_aoi": {
"color": [255, 255, 255],
@@ -102,10 +115,6 @@
"draw_matched_fixation": {
"deviation_circle_color": [255, 255, 255]
},
- "draw_matched_fixation_positions": {
- "position_color": [0, 255, 255],
- "line_color": [0, 0, 0]
- },
"draw_looked_aoi": {
"color": [0, 255, 0],
"border_size": 2
@@ -115,6 +124,15 @@
}
}
},
+ "draw_fixations": {
+ "deviation_circle_color": [255, 255, 255],
+ "duration_border_color": [127, 0, 127],
+ "duration_factor": 1e-2,
+ "draw_positions": {
+ "position_color": [0, 255, 255],
+ "line_color": [0, 0, 0]
+ }
+ },
"draw_gaze_positions": {
"color": [0, 255, 255],
"size": 2
diff --git a/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json b/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json
index 414a6fe..f921662 100644
--- a/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json
+++ b/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json
@@ -8,6 +8,7 @@
"duration_min_threshold": 200
}
},
+ "filter_in_progress_identification": false,
"scan_path": {
"duration_max": 10000
},
@@ -17,7 +18,7 @@
"NearestNeighborIndex": {
"size": [1920, 1149]
},
- "ExploitExploreRatio": {
+ "ExploreExploitRatio": {
"short_fixation_duration_threshold": 0
}
},
@@ -25,11 +26,10 @@
"size": [320, 240]
},
"layers": {
- "GrayRectangle": {
- "aoi_scene": "aoi_3d_scene.obj",
+ "main_layer": {
+ "aoi_scene": "aoi_2d_scene.json",
"aoi_matcher": {
"DeviationCircleCoverage": {
- "exclude": ["GrayRectangle"],
"coverage_threshold": 0.5
}
},
@@ -60,11 +60,10 @@
},
"draw_saccades": {
"line_color": [255, 0, 255]
- },
- "deepness": 0
+ }
},
"draw_layers": {
- "GrayRectangle": {
+ "main_layer": {
"draw_aoi_scene": {
"draw_aoi": {
"color": [255, 255, 255],
@@ -73,11 +72,11 @@
},
"draw_aoi_matching": {
"draw_matched_fixation": {
- "deviation_circle_color": [255, 255, 255]
- },
- "draw_matched_fixation_positions": {
- "position_color": [0, 255, 255],
- "line_color": [0, 0, 0]
+ "deviation_circle_color": [255, 255, 255],
+ "draw_positions": {
+ "position_color": [0, 255, 0],
+ "line_color": [0, 0, 0]
+ }
},
"draw_matched_region": {
"color": [0, 255, 0],
@@ -92,6 +91,18 @@
}
}
},
+ "draw_fixations": {
+ "deviation_circle_color": [255, 255, 255],
+ "duration_border_color": [127, 0, 127],
+ "duration_factor": 1e-2,
+ "draw_positions": {
+ "position_color": [0, 255, 255],
+ "line_color": [0, 0, 0]
+ }
+ },
+ "draw_saccades": {
+ "line_color": [255, 0, 255]
+ },
"draw_gaze_positions": {
"color": [0, 255, 255],
"size": 2
diff --git a/src/argaze/utils/demo_gaze_analysis_run.py b/src/argaze/utils/demo_gaze_analysis_run.py
index 465c5db..9856d90 100644
--- a/src/argaze/utils/demo_gaze_analysis_run.py
+++ b/src/argaze/utils/demo_gaze_analysis_run.py
@@ -74,18 +74,18 @@ def main():
# Write last 5 steps of aoi scan path
path = ''
- for step in ar_frame.layers["GrayRectangle"].aoi_scan_path[-5:]:
+ for step in ar_frame.layers["main_layer"].aoi_scan_path[-5:]:
path += f'> {step.aoi} '
- path += f'> {ar_frame.layers["GrayRectangle"].aoi_scan_path.current_aoi}'
+ path += f'> {ar_frame.layers["main_layer"].aoi_scan_path.current_aoi}'
cv2.putText(frame_image, path, (20, ar_frame.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
# Display Transition matrix analysis if loaded
try:
- transition_matrix_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.TransitionMatrix"]
+ transition_matrix_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.TransitionMatrix"]
cv2.putText(frame_image, f'Transition matrix density: {transition_matrix_analyzer.transition_matrix_density:.2f}', (20, ar_frame.size[1]-160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
@@ -97,8 +97,8 @@ def main():
if from_aoi != to_aoi and probability > 0.0:
- from_center = ar_frame.layers['GrayRectangle'].aoi_scene[from_aoi].center.astype(int)
- to_center = ar_frame.layers['GrayRectangle'].aoi_scene[to_aoi].center.astype(int)
+ from_center = ar_frame.layers["main_layer"].aoi_scene[from_aoi].center.astype(int)
+ to_center = ar_frame.layers["main_layer"].aoi_scene[to_aoi].center.astype(int)
start_line = (0.5 * from_center + 0.5 * to_center).astype(int)
color = [int(probability*200) + 55, int(probability*200) + 55, int(probability*200) + 55]
@@ -112,7 +112,7 @@ def main():
# Display aoi scan path basic metrics analysis if loaded
try:
- basic_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.Basic"]
+ basic_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.Basic"]
# Write basic analysis
cv2.putText(frame_image, f'Step number: {basic_analyzer.steps_number}', (20, ar_frame.size[1]-440), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
@@ -141,7 +141,7 @@ def main():
# Display aoi scan path K-modified coefficient analysis if loaded
try:
- aoi_kc_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.KCoefficient"]
+ aoi_kc_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.KCoefficient"]
# Write aoi Kc analysis
if aoi_kc_analyzer.K < 0.:
@@ -158,7 +158,7 @@ def main():
# Display Lempel-Ziv complexity analysis if loaded
try:
- lzc_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.LempelZivComplexity"]
+ lzc_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.LempelZivComplexity"]
cv2.putText(frame_image, f'Lempel-Ziv complexity: {lzc_analyzer.lempel_ziv_complexity}', (20, ar_frame.size[1]-200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
@@ -168,7 +168,7 @@ def main():
# Display N-Gram analysis if loaded
try:
- ngram_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.NGram"]
+ ngram_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.NGram"]
# Display only 3-gram analysis
start = ar_frame.size[1] - ((len(ngram_analyzer.ngrams_count[3]) + 1) * 40)
@@ -188,7 +188,7 @@ def main():
# Display Entropy analysis if loaded
try:
- entropy_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.Entropy"]
+ entropy_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.Entropy"]
cv2.putText(frame_image, f'Stationary entropy: {entropy_analyzer.stationary_entropy:.3f},', (20, ar_frame.size[1]-280), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
cv2.putText(frame_image, f'Transition entropy: {entropy_analyzer.transition_entropy:.3f},', (20, ar_frame.size[1]-240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
@@ -206,12 +206,12 @@ def main():
except KeyError:
pass
- # Display Exploit/Explore ratio analysis if loaded
+ # Display Explore/Exploit ratio analysis if loaded
try:
- xxr_analyser = ar_frame.scan_path_analyzers["argaze.GazeAnalysis.ExploitExploreRatio"]
+ xxr_analyser = ar_frame.scan_path_analyzers["argaze.GazeAnalysis.ExploreExploitRatio"]
- cv2.putText(frame_image, f'Exploit explore ratio: {xxr_analyser.exploit_explore_ratio:.3f}', (20, ar_frame.size[1]-360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(frame_image, f'Explore/Exploit ratio: {xxr_analyser.explore_exploit_ratio:.3f}', (20, ar_frame.size[1]-360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
except KeyError: