aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/argaze.test/ArUcoMarker/ArUcoCamera.py4
-rw-r--r--src/argaze.test/ArUcoMarker/utils/aruco_camera.json8
-rw-r--r--src/argaze/ArFeatures.py77
-rw-r--r--src/argaze/ArUcoMarker/ArUcoDetector.py101
-rw-r--r--src/argaze/ArUcoMarker/ArUcoMarkerGroup.py2
-rw-r--r--src/argaze/DataFeatures.py24
-rw-r--r--src/argaze/GazeAnalysis/KCoefficient.py8
-rw-r--r--src/argaze/__main__.py32
-rw-r--r--src/argaze/utils/UtilsFeatures.py43
-rw-r--r--src/argaze/utils/contexts/OpenCV.py166
-rw-r--r--src/argaze/utils/contexts/PupilLabsInvisible.py (renamed from src/argaze/utils/contexts/PupilLabs.py)17
-rw-r--r--src/argaze/utils/contexts/TobiiProGlasses2.py65
-rw-r--r--src/argaze/utils/contexts/TobiiProGlasses3.py128
-rw-r--r--src/argaze/utils/demo/aruco_markers_pipeline.json42
-rw-r--r--src/argaze/utils/demo/gaze_analysis_pipeline.json2
-rw-r--r--src/argaze/utils/demo/opencv_camera_context.json7
-rw-r--r--src/argaze/utils/demo/opencv_movie_context.json2
-rw-r--r--src/argaze/utils/demo/pupillabs_invisible_live_stream_context.json6
-rw-r--r--src/argaze/utils/demo/pupillabs_live_stream_context.json6
-rw-r--r--src/argaze/utils/demo/recorders.py60
-rw-r--r--src/argaze/utils/demo/tobii_g2_live_stream_context.json (renamed from src/argaze/utils/demo/tobii_live_stream_context.json)0
-rw-r--r--src/argaze/utils/demo/tobii_g3_live_stream_context.json6
-rw-r--r--src/argaze/utils/demo/tobii_segment_playback_context.json (renamed from src/argaze/utils/demo/tobii_post_processing_context.json)4
-rw-r--r--src/argaze/utils/estimate_markers_pose/observers.py8
-rw-r--r--src/argaze/utils/estimate_markers_pose/pipeline.json2
25 files changed, 446 insertions, 374 deletions
diff --git a/src/argaze.test/ArUcoMarker/ArUcoCamera.py b/src/argaze.test/ArUcoMarker/ArUcoCamera.py
index 76b567e..0777beb 100644
--- a/src/argaze.test/ArUcoMarker/ArUcoCamera.py
+++ b/src/argaze.test/ArUcoMarker/ArUcoCamera.py
@@ -71,10 +71,6 @@ class TestArUcoCameraClass(unittest.TestCase):
self.assertEqual(len(ar_scene.layers.items()), 1)
self.assertEqual(len(ar_scene.layers["Main"].aoi_scene), 1)
self.assertEqual(ar_scene.layers["Main"].aoi_scene['Test'].points_number, 4)
-
- # Check ArScene
- self.assertEqual(ar_scene.angle_tolerance, 1.0)
- self.assertEqual(ar_scene.distance_tolerance, 2.0)
if __name__ == '__main__':
diff --git a/src/argaze.test/ArUcoMarker/utils/aruco_camera.json b/src/argaze.test/ArUcoMarker/utils/aruco_camera.json
index 980dc9f..7217c0e 100644
--- a/src/argaze.test/ArUcoMarker/utils/aruco_camera.json
+++ b/src/argaze.test/ArUcoMarker/utils/aruco_camera.json
@@ -63,9 +63,7 @@
"Main" : {
"aoi_scene": "aoi_3d.obj"
}
- },
- "angle_tolerance": 1.0,
- "distance_tolerance": 2.0
+ }
},
"TestSceneB" : {
"aruco_markers_group": {
@@ -87,9 +85,7 @@
"Main" : {
"aoi_scene": "aoi_3d.obj"
}
- },
- "angle_tolerance": 1.0,
- "distance_tolerance": 2.0
+ }
}
},
"layers": {
diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py
index 8d9eceb..4515ae1 100644
--- a/src/argaze/ArFeatures.py
+++ b/src/argaze/ArFeatures.py
@@ -344,7 +344,7 @@ class ArLayer(DataFeatures.SharedObject, DataFeatures.PipelineStepObject):
if self.__aoi_matcher is not None and self.__aoi_scene is not None:
- # Update looked aoi thanks to aoi matcher
+ # Update looked aoi with aoi matcher
# Note: don't filter valid/invalid and finished/unfinished fixation/saccade as we don't know how the aoi matcher works internally
self.__looked_aoi_name, _ = self.__aoi_matcher.match(gaze_movement, self.__aoi_scene)
@@ -915,8 +915,6 @@ class ArScene(DataFeatures.PipelineStepObject):
# Init private attributes
self._layers = {}
self.__frames = {}
- self.__angle_tolerance = 0.
- self.__distance_tolerance = 0.
@property
def layers(self) -> dict:
@@ -1010,35 +1008,13 @@ class ArScene(DataFeatures.PipelineStepObject):
for name, frame in self.__frames.items():
frame.parent = self
- @property
- def angle_tolerance(self) -> float:
- """Angle error tolerance to validate marker pose in degree used into [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function."""
- return self.__angle_tolerance
-
- @angle_tolerance.setter
- def angle_tolerance(self, value: float):
-
- self.__angle_tolerance = value
-
- @property
- def distance_tolerance(self) -> float:
- """Distance error tolerance to validate marker pose in centimeter used into [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function."""
- return self.__distance_tolerance
-
- @distance_tolerance.setter
- def distance_tolerance(self, value: float):
-
- self.__distance_tolerance = value
-
def as_dict(self) -> dict:
"""Export ArScene properties as dictionary."""
return {
**DataFeatures.PipelineStepObject.as_dict(self),
"layers": self._layers,
- "frames": self.__frames,
- "angle_tolerance": self.__angle_tolerance,
- "distance_tolerance": self.__distance_tolerance
+ "frames": self.__frames
}
@DataFeatures.PipelineStepMethod
@@ -1174,8 +1150,11 @@ class ArCamera(ArFrame):
self.__projection_cache = projection_cache
+ # DEBUG
+ print("projection_cache", self.__projection_cache)
+
# The file doesn't exist yet: store projections into the cache
- if not os.path.exists(os.path.join( DataFeatures.get_working_directory(), self.__projection_cache) ):
+ if not os.path.exists(self.__projection_cache):
self.__projection_cache_writer = UtilsFeatures.FileWriter(path=self.__projection_cache)
self.__projection_cache_reader = None
@@ -1521,7 +1500,7 @@ class ArContext(DataFeatures.PipelineStepObject):
self._image_parameters = DEFAULT_ARCONTEXT_IMAGE_PARAMETERS
@property
- def pipeline(self) -> DataFeatures.PipelineStepObject:
+ def pipeline(self) -> ArFrame|ArCamera:
"""ArFrame used to process gaze data or ArCamera used to process gaze data and video of environment."""
return self.__pipeline
@@ -1538,7 +1517,7 @@ class ArContext(DataFeatures.PipelineStepObject):
return self.__exceptions
def as_dict(self) -> dict:
- """Export ArContext properties as dictionary."""
+ """Export context properties as dictionary."""
return {
**DataFeatures.PipelineStepObject.as_dict(self),
@@ -1548,13 +1527,13 @@ class ArContext(DataFeatures.PipelineStepObject):
@DataFeatures.PipelineStepEnter
def __enter__(self):
- """Enter into ArContext."""
+ """Enter into context."""
return self
@DataFeatures.PipelineStepExit
def __exit__(self, exception_type, exception_value, exception_traceback):
- """Exit from ArContext."""
+ """Exit from context."""
pass
def _process_gaze_position(self, timestamp: int | float, x: int | float = None, y: int | float = None, precision: int | float = None):
@@ -1709,24 +1688,24 @@ class ArContext(DataFeatures.PipelineStepObject):
@DataFeatures.PipelineStepMethod
def pause(self):
- """Pause pipeline processing."""
+ """Pause context."""
self._pause_event.set()
def is_paused(self) -> bool:
- """Is pipeline processing paused?"""
+ """Is context paused?"""
return self._pause_event.is_set()
@DataFeatures.PipelineStepMethod
def resume(self):
- """Resume pipeline processing."""
+ """Resume context."""
self._pause_event.clear()
-class LiveProcessingContext(ArContext):
+class DataCaptureContext(ArContext):
"""
- Defines abstract live data processing context.
+ Defines abstract data capture context.
"""
@DataFeatures.PipelineStepInit
@@ -1739,14 +1718,14 @@ class LiveProcessingContext(ArContext):
raise NotImplementedError
-# Define default PostProcessingContext image parameters
-DEFAULT_POST_PROCESSING_CONTEXT_IMAGE_PARAMETERS = {
+# Define default DataPlaybackContext image parameters
+DEFAULT_DATA_PLAYBACK_CONTEXT_IMAGE_PARAMETERS = {
"draw_progression": True
}
-class PostProcessingContext(ArContext):
+class DataPlaybackContext(ArContext):
"""
- Defines abstract post data processing context.
+ Defines abstract data playback context.
"""
@DataFeatures.PipelineStepInit
@@ -1754,17 +1733,7 @@ class PostProcessingContext(ArContext):
super().__init__()
- self._image_parameters = {**DEFAULT_ARCONTEXT_IMAGE_PARAMETERS, **DEFAULT_POST_PROCESSING_CONTEXT_IMAGE_PARAMETERS}
-
- def previous(self):
- """Go to previous frame"""
-
- raise NotImplementedError
-
- def next(self):
- """Go to next frame"""
-
- raise NotImplementedError
+ self._image_parameters = {**DEFAULT_ARCONTEXT_IMAGE_PARAMETERS, **DEFAULT_DATA_PLAYBACK_CONTEXT_IMAGE_PARAMETERS}
@property
def duration(self) -> int|float:
@@ -1774,19 +1743,19 @@ class PostProcessingContext(ArContext):
@property
def progression(self) -> float:
- """Get data processing progression between 0 and 1."""
+ """Get data playback progression between 0 and 1."""
raise NotImplementedError
@DataFeatures.PipelineStepImage
def image(self, draw_progression: bool = True, **kwargs):
"""
- Get pipeline image with post processing information.
+ Get pipeline image with data playback information.
Parameters:
draw_progression: draw progress bar
"""
- logging.debug('PostProcessingContext.image %s', self.name)
+ logging.debug('DataPlaybackContext.image %s', self.name)
image = super().image(**kwargs)
height, width, _ = image.shape
diff --git a/src/argaze/ArUcoMarker/ArUcoDetector.py b/src/argaze/ArUcoMarker/ArUcoDetector.py
index 50da144..8ff840b 100644
--- a/src/argaze/ArUcoMarker/ArUcoDetector.py
+++ b/src/argaze/ArUcoMarker/ArUcoDetector.py
@@ -21,110 +21,63 @@ import json
from collections import Counter
from typing import Self
-import cv2 as cv
+import cv2
import numpy
-from cv2 import aruco
from argaze import DataFeatures
from argaze.ArUcoMarker import ArUcoMarkerDictionary, ArUcoMarker, ArUcoOpticCalibrator, ArUcoMarkerGroup
-class DetectorParameters():
- """Wrapper class around ArUco marker detector parameters.
+class DetectorParameters(cv2.aruco.DetectorParameters):
+ """OpenCV DetectorParameters wrapper.
!!! note
More details on [opencv page](https://docs.opencv.org/4.x/d1/dcd/structcv_1_1aruco_1_1DetectorParameters.html)
"""
- __parameters = aruco.DetectorParameters()
- __parameters_names = [
- 'adaptiveThreshConstant',
- 'adaptiveThreshWinSizeMax',
- 'adaptiveThreshWinSizeMin',
- 'adaptiveThreshWinSizeStep',
- 'aprilTagCriticalRad',
- 'aprilTagDeglitch',
- 'aprilTagMaxLineFitMse',
- 'aprilTagMaxNmaxima',
- 'aprilTagMinClusterPixels',
- 'aprilTagMinWhiteBlackDiff',
- 'aprilTagQuadDecimate',
- 'aprilTagQuadSigma',
- 'cornerRefinementMaxIterations',
- 'cornerRefinementMethod',
- 'cornerRefinementMinAccuracy',
- 'cornerRefinementWinSize',
- 'markerBorderBits',
- 'minMarkerPerimeterRate',
- 'maxMarkerPerimeterRate',
- 'minMarkerDistanceRate',
- 'detectInvertedMarker',
- 'errorCorrectionRate',
- 'maxErroneousBitsInBorderRate',
- 'minCornerDistanceRate',
- 'minDistanceToBorder',
- 'minOtsuStdDev',
- 'perspectiveRemoveIgnoredMarginPerCell',
- 'perspectiveRemovePixelPerCell',
- 'polygonalApproxAccuracyRate',
- 'useAruco3Detection'
- ]
-
def __init__(self, **kwargs):
- for parameter, value in kwargs.items():
- setattr(self.__parameters, parameter, value)
-
- self.__dict__.update(kwargs)
-
- def __setattr__(self, parameter, value):
+ super().__init__()
- setattr(self.__parameters, parameter, value)
+ self.__modified = []
- def __getattr__(self, parameter):
+ self.__parameters_names = [name for name in dir(self) if not name.startswith('_')]
+ self.__parameters_names.remove('from_json')
+ self.__parameters_names.remove('readDetectorParameters')
+ self.__parameters_names.remove('writeDetectorParameters')
+
+ for parameter, value in kwargs.items():
- return getattr(self.__parameters, parameter)
+ setattr(self, parameter, value)
+ self.__modified.append(parameter)
@classmethod
def from_json(cls, json_filepath) -> Self:
"""Load detector parameters from .json file."""
with open(json_filepath) as configuration_file:
+
return DetectorParameters(**json.load(configuration_file))
def __str__(self) -> str:
"""Detector parameters string representation."""
- return f'{self}'
-
- def __format__(self, spec: str) -> str:
- """Formated detector parameters string representation.
-
- Parameters:
- spec: 'modified' to get only modified parameters.
- """
-
output = ''
for parameter in self.__parameters_names:
- if parameter in self.__dict__.keys():
+ if parameter in self.__modified:
- output += f'\t*{parameter}: {getattr(self.__parameters, parameter)}\n'
+ output += f'\t*{parameter}: {getattr(self, parameter)}\n'
- elif spec == "":
+ else:
- output += f'\t{parameter}: {getattr(self.__parameters, parameter)}\n'
+ output += f'\t{parameter}: {getattr(self, parameter)}\n'
return output
- @property
- def internal(self):
- return self.__parameters
-
-
class ArUcoDetector(DataFeatures.PipelineStepObject):
- """OpenCV ArUco library wrapper."""
+ """OpenCV ArucoDetector wrapper."""
# noinspection PyMissingConstructor
@DataFeatures.PipelineStepInit
@@ -201,7 +154,7 @@ class ArUcoDetector(DataFeatures.PipelineStepObject):
self.__detected_markers, detected_markers_corners, detected_markers_ids = {}, [], []
# Detect markers into gray picture
- detected_markers_corners, detected_markers_ids, _ = aruco.detectMarkers(cv.cvtColor(image, cv.COLOR_BGR2GRAY), self.__dictionary.markers, parameters=self.__parameters.internal if self.__parameters else None)
+ detected_markers_corners, detected_markers_ids, _ = cv2.aruco.detectMarkers(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), self.__dictionary.markers, parameters=self.__parameters)
# Is there detected markers ?
if len(detected_markers_corners) > 0:
@@ -272,7 +225,7 @@ class ArUcoDetector(DataFeatures.PipelineStepObject):
if len(ids) == 0:
ids = self.__detected_markers.keys()
- # Prepare data for aruco.estimatePoseSingleMarkers function
+ # Prepare data for cv2.aruco.estimatePoseSingleMarkers function
selected_markers_corners = tuple()
selected_markers_ids = []
@@ -286,14 +239,14 @@ class ArUcoDetector(DataFeatures.PipelineStepObject):
# Estimate pose of selected markers
if len(selected_markers_corners) > 0:
- markers_rvecs, markers_tvecs, markers_points = aruco.estimatePoseSingleMarkers(selected_markers_corners, size, numpy.array(self.__optic_parameters.K), numpy.array(self.__optic_parameters.D))
+ markers_rvecs, markers_tvecs, markers_points = cv2.aruco.estimatePoseSingleMarkers(selected_markers_corners, size, numpy.array(self.__optic_parameters.K), numpy.array(self.__optic_parameters.D))
for i, marker_id in enumerate(selected_markers_ids):
marker = self.__detected_markers[marker_id]
marker.translation = markers_tvecs[i][0]
- marker.rotation, _ = cv.Rodrigues(markers_rvecs[i][0])
+ marker.rotation, _ = cv2.Rodrigues(markers_rvecs[i][0])
marker.size = size
marker.points = markers_points.reshape(4, 3).dot(marker.rotation) - marker.translation
@@ -328,15 +281,15 @@ class ArUcoDetector(DataFeatures.PipelineStepObject):
"""
# detect markers from gray picture
- gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
- detected_markers_corners, detected_markers_ids, _ = aruco.detectMarkers(gray, self.__dictionary.markers,
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
+ detected_markers_corners, detected_markers_ids, _ = cv2.aruco.detectMarkers(gray, self.__dictionary.markers,
parameters=self.__parameters.internal)
# if all board markers are detected
if len(detected_markers_corners) == expected_markers_number:
self.__board = board
- self.__board_corners_number, self.__board_corners, self.__board_corners_ids = aruco.interpolateCornersCharuco(
+ self.__board_corners_number, self.__board_corners, self.__board_corners_ids = cv2.aruco.interpolateCornersCharuco(
detected_markers_corners, detected_markers_ids, gray, self.__board.model)
else:
@@ -350,7 +303,7 @@ class ArUcoDetector(DataFeatures.PipelineStepObject):
"""Draw detected board corners in image."""
if self.__board is not None:
- cv.drawChessboardCorners(image, ((self.__board.size[0] - 1), (self.__board.size[1] - 1)),
+ cv2.drawChessboardCorners(image, ((self.__board.size[0] - 1), (self.__board.size[1] - 1)),
self.__board_corners, True)
def board_corners_number(self) -> int:
diff --git a/src/argaze/ArUcoMarker/ArUcoMarkerGroup.py b/src/argaze/ArUcoMarker/ArUcoMarkerGroup.py
index 5575cad..8cd8043 100644
--- a/src/argaze/ArUcoMarker/ArUcoMarkerGroup.py
+++ b/src/argaze/ArUcoMarker/ArUcoMarkerGroup.py
@@ -149,7 +149,7 @@ class ArUcoMarkerGroup(DataFeatures.PipelineStepObject):
new_marker = ArUcoMarker.ArUcoMarker(self.__dictionary, identifier, size)
- # Build marker corners thanks to translation vector and rotation matrix
+ # Build marker corners considering translation vector and rotation matrix
place_corners = numpy.array([[-size / 2, size / 2, 0], [size / 2, size / 2, 0], [size / 2, -size / 2, 0], [-size / 2, -size / 2, 0]])
place_corners = place_corners.dot(rmat) + tvec
diff --git a/src/argaze/DataFeatures.py b/src/argaze/DataFeatures.py
index 2629e8e..491d6ac 100644
--- a/src/argaze/DataFeatures.py
+++ b/src/argaze/DataFeatures.py
@@ -703,21 +703,20 @@ def PipelineStepExecutionTime(method):
end = time.perf_counter()
- # Check earlier call dates to calculate frequency
- try:
-
- last_start, last_end = self._execution_times[method.__name__]
-
- if start > last_start:
-
- self._execution_frequencies[method.__name__] = 1 / (start - last_start)
-
- except KeyError:
+ # Create list to store method call dates and init call frequency
+ if method.__name__ not in self._execution_times.keys():
+ self._execution_times[method.__name__] = []
self._execution_frequencies[method.__name__] = math.nan
# Store start end end dates
- self._execution_times[method.__name__] = (start, end)
+ self._execution_times[method.__name__].append((start, end))
+
+ # Remove call dates older than 1 second and count number of calls to get frequency
+ while self._execution_times[method.__name__][-1][0] - self._execution_times[method.__name__][0][0] > 1:
+
+ self._execution_times[method.__name__].pop(0)
+ self._execution_frequencies[method.__name__] = len(self._execution_times[method.__name__])
return result
@@ -1354,8 +1353,7 @@ class PipelineStepObject():
# Check execution time
try:
- start, end = self._execution_times[method_name]
- t = end - start
+ t = numpy.mean(numpy.diff(self._execution_times[method_name]))
except KeyError:
diff --git a/src/argaze/GazeAnalysis/KCoefficient.py b/src/argaze/GazeAnalysis/KCoefficient.py
index f9218cb..c6f303a 100644
--- a/src/argaze/GazeAnalysis/KCoefficient.py
+++ b/src/argaze/GazeAnalysis/KCoefficient.py
@@ -48,7 +48,7 @@ class ScanPathAnalyzer(GazeFeatures.ScanPathAnalyzer):
for scan_step in scan_path:
- durations.append(scan_step.duration)
+ durations.append(scan_step.fixation_duration)
amplitudes.append(scan_step.last_saccade.amplitude)
durations = numpy.array(durations)
@@ -65,7 +65,7 @@ class ScanPathAnalyzer(GazeFeatures.ScanPathAnalyzer):
Ks = []
for scan_step in scan_path:
- Ks.append((abs(scan_step.duration - duration_mean) / duration_std) - (abs(scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std))
+ Ks.append((abs(scan_step.fixation_duration - duration_mean) / duration_std) - (abs(scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std))
self.__K = numpy.array(Ks).mean()
@@ -106,7 +106,7 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer):
for aoi_scan_step in aoi_scan_path:
- durations.append(aoi_scan_step.duration)
+ durations.append(aoi_scan_step.fixation_duration)
amplitudes.append(aoi_scan_step.last_saccade.amplitude)
durations = numpy.array(durations)
@@ -123,7 +123,7 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer):
Ks = []
for aoi_scan_step in aoi_scan_path:
- Ks.append((abs(aoi_scan_step.duration - duration_mean) / duration_std) - (abs(aoi_scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std))
+ Ks.append((abs(aoi_scan_step.fixation_duration - duration_mean) / duration_std) - (abs(aoi_scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std))
self.__K = numpy.array(Ks).mean()
diff --git a/src/argaze/__main__.py b/src/argaze/__main__.py
index 76e9664..c65d6e5 100644
--- a/src/argaze/__main__.py
+++ b/src/argaze/__main__.py
@@ -24,10 +24,11 @@ import contextlib
import time
import os
import stat
+import math
from . import load
from .DataFeatures import SharedObjectBusy
-from .ArFeatures import ArCamera, ArContext, PostProcessingContext, LiveProcessingContext
+from .ArFeatures import ArCamera, ArContext, DataPlaybackContext, DataCaptureContext
from .utils.UtilsFeatures import print_progress_bar
import cv2
@@ -68,7 +69,7 @@ def load_context(args):
# Blanck line
info_stack += 1
- if issubclass(type(context), LiveProcessingContext):
+ if issubclass(type(context), DataCaptureContext):
info_stack += 1
cv2.putText(image, f'Press Enter to start calibration', (int(width/4)+20, int(height/3)+(info_stack*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
@@ -76,7 +77,7 @@ def load_context(args):
info_stack += 1
cv2.putText(image, f'Press r to start/stop recording', (int(width/4)+20, int(height/3)+(info_stack*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
- if issubclass(type(context), PostProcessingContext):
+ if issubclass(type(context), DataPlaybackContext):
info_stack += 1
cv2.putText(image, f'Press Space bar to pause/resume processing', (int(width/4)+20, int(height/3)+(info_stack*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
@@ -199,8 +200,8 @@ def load_context(args):
raise KeyboardInterrupt()
- # Keys specific to live processing contexts
- if issubclass(type(context), LiveProcessingContext):
+ # Keys specific to data capture contexts
+ if issubclass(type(context), DataCaptureContext):
# Enter: start calibration
if key_pressed == 13:
@@ -222,10 +223,10 @@ def load_context(args):
context.create_recording()
context.start_recording()
- # Keys specific to post processing contexts
- if issubclass(type(context), PostProcessingContext):
+ # Keys specific to data playback contexts
+ if issubclass(type(context), DataPlaybackContext):
- # Space bar: pause/resume pipeline processing
+ # Space bar: pause/resume data playback
if key_pressed == 32:
@@ -236,21 +237,11 @@ def load_context(args):
else:
context.pause()
-
- # Select previous image with left arrow
- if key_pressed == 2:
-
- context.previous()
-
- # Select next image with right arrow
- if key_pressed == 3:
-
- context.next()
# Window mode off
else:
- if issubclass(type(context), PostProcessingContext):
+ if issubclass(type(context), DataPlaybackContext):
prefix = f'Progression'
suffix = f'| {int(context.progression*context.duration * 1e-3)}s in {int(time.time()-start_time)}s'
@@ -261,7 +252,8 @@ def load_context(args):
if issubclass(type(context.pipeline), ArCamera):
watch_time, watch_freq = context.pipeline.execution_info('watch')
- suffix += f' | Watch {int(watch_time)}ms at {watch_freq}Hz'
+
+ suffix += f' | Watch {int(watch_time) if not math.isnan(watch_time) else 0}ms at {watch_freq if not math.isnan(watch_freq) else 0}Hz'
# Clear old longer print
suffix += ' '
diff --git a/src/argaze/utils/UtilsFeatures.py b/src/argaze/utils/UtilsFeatures.py
index ce92e35..5dbc10c 100644
--- a/src/argaze/utils/UtilsFeatures.py
+++ b/src/argaze/utils/UtilsFeatures.py
@@ -155,11 +155,6 @@ class TimeProbe():
self.start()
-def tuple_to_string(t: tuple, separator: str = ", ") -> str:
- """Convert tuple elements into quoted strings separated by a separator string."""
-
- return separator.join(f'\"{e}\"' for e in t)
-
def PrintCallStack(method):
"""Define a decorator to print call stack until the decorated method."""
@@ -230,37 +225,29 @@ class FileWriter(DataFeatures.PipelineStepObject):
os.makedirs(self.__path.parent.absolute())
# Open file
- self.__file = open(self.__path, 'w', encoding='utf-8', buffering=1)
+ self.__file = open(self.__path, 'w', encoding='utf-8', newline='', buffering=1)
+ self.__writer = csv.writer(self.__file, delimiter=self.__separator, quoting=csv.QUOTE_NONNUMERIC)
# Write header if required
if self.__header is not None:
- # Format list or tuple element into quoted strings
- if not isinstance(self.__header, str):
-
- self.__header = tuple_to_string(self.__header, self.__separator)
-
- print(self.__header, file=self.__file, flush=True)
+ self.__writer.writerow(self.__header)
@DataFeatures.PipelineStepExit
def __exit__(self, exception_type, exception_value, exception_traceback):
"""Close file."""
+
self.__file.close()
def write(self, data: str|tuple):
- """Write data as a new line into file.
+ """Write data as a new line into file."""
- !!! note
- Tuple elements are converted into quoted strings separated by separator string.
- """
-
- # Format list or tuple element into quoted strings
- if not isinstance(data, str):
-
- data = tuple_to_string(data, self.__separator)
+ if self.__file.closed:
+
+ return
# Write into file
- print(data, file=self.__file, flush=True)
+ self.__writer.writerow(data)
class FileReader(DataFeatures.PipelineStepObject):
"""Read data from a file line by line."""
@@ -305,11 +292,7 @@ class FileReader(DataFeatures.PipelineStepObject):
pass
def read(self) -> str|tuple:
- """Read next data from file.
-
- !!! note
- Quoted strings separated by separator string are converted into tuple elements.
- """
+ """Read next data from file."""
try:
@@ -443,7 +426,7 @@ class LookPerformanceRecorder(FileWriter):
super().__init__(**kwargs)
- self.header = "Timestamp (ms)", "Time (ms)", "Frequency (Hz)"
+ self.header = "Real time (ms)", "Frame timestamp (ms)", "Execution time (ms)", "Execution frequency (Hz)"
self.__start_time = time.perf_counter()
@@ -456,6 +439,7 @@ class LookPerformanceRecorder(FileWriter):
log = (
(time.perf_counter() - self.__start_time) * 1e3,
+ timestamp,
t * 1e3,
f
)
@@ -470,7 +454,7 @@ class WatchPerformanceRecorder(FileWriter):
super().__init__(**kwargs)
- self.header = "Timestamp (ms)", "Time (ms)", "Frequency (Hz)"
+ self.header = "Real time (ms)", "Camera timestamp (ms)", "Execution time (ms)", "Execution frequency (Hz)"
self.__start_time = time.perf_counter()
@@ -483,6 +467,7 @@ class WatchPerformanceRecorder(FileWriter):
log = (
(time.perf_counter() - self.__start_time) * 1e3,
+ timestamp,
t * 1e3,
f
)
diff --git a/src/argaze/utils/contexts/OpenCV.py b/src/argaze/utils/contexts/OpenCV.py
index 273705a..908f91d 100644
--- a/src/argaze/utils/contexts/OpenCV.py
+++ b/src/argaze/utils/contexts/OpenCV.py
@@ -27,7 +27,7 @@ from argaze import ArFeatures, DataFeatures
class Cursor(ArFeatures.ArContext):
- """Process cursor position over OpenCV window.
+ """Capture cursor position over OpenCV window.
!!! warning
It is assumed that an OpenCV window with the same name than the context is used to display context's pipeline image.
@@ -36,7 +36,7 @@ class Cursor(ArFeatures.ArContext):
@DataFeatures.PipelineStepInit
def __init__(self, **kwargs):
- # Init LiveProcessingContext class
+ # Init DataCaptureContext class
super().__init__()
@DataFeatures.PipelineStepEnter
@@ -74,8 +74,8 @@ class Cursor(ArFeatures.ArContext):
self._process_gaze_position(timestamp = int((time.time() - self._start_time) * 1e3), x = x, y = y)
-class Movie(Cursor):
- """Process movie images and cursor position over OpenCV window.
+class Movie(Cursor, ArFeatures.DataPlaybackContext):
+ """Playback movie images and capture cursor position over OpenCV window.
!!! warning
It is assumed that an OpenCV window with the same name than the context is used to display context's pipeline image.
@@ -134,45 +134,35 @@ class Movie(Cursor):
def __read(self):
"""Iterate on movie images."""
- # Init image selection
- _, current_image = self.__movie.read()
- current_image_time = self.__movie.get(cv2.CAP_PROP_POS_MSEC)
- self.__next_image_index = 0 #int(self.__start * self.__movie_fps)
-
- while not self._stop_event.is_set():
+ while self.is_running():
# Check pause event (and stop event)
- while self._pause_event.is_set() and not self._stop_event.is_set():
+ while self.is_paused() and self.is_running():
logging.debug('> reading is paused at %i', current_image_time)
time.sleep(1)
- # Select a new image and detect markers once
- if self.__next_image_index != self.__current_image_index or self.__refresh:
-
- self.__movie.set(cv2.CAP_PROP_POS_FRAMES, self.__next_image_index)
-
- success, image = self.__movie.read()
+ # Read image
+ success, image = self.__movie.read()
- if success:
+ if success:
- video_height, video_width, _ = image.shape
+ # Refresh once
+ self.__refresh = False
- # Refresh once
- self.__refresh = False
+ #self.__current_image_index = self.__movie.get(cv2.CAP_PROP_POS_FRAMES) - 1
+ current_image_time = self.__movie.get(cv2.CAP_PROP_POS_MSEC)
- self.__current_image_index = self.__movie.get(cv2.CAP_PROP_POS_FRAMES) - 1
- current_image_time = self.__movie.get(cv2.CAP_PROP_POS_MSEC)
+ # Timestamp image
+ image = DataFeatures.TimestampedImage(image, timestamp=current_image_time)
- # Timestamp image
- image = DataFeatures.TimestampedImage(image, timestamp=current_image_time)
+ # Process movie image
+ self._process_camera_image(timestamp=current_image_time, image=image)
- # Process movie image
- self._process_camera_image(timestamp=current_image_time, image=image)
-
- # Wait
- time.sleep(1 / self.__movie_fps)
+ # Wait for half frame time
+ # TODO: Consider camera image processing time to adapt waiting time
+ time.sleep(0.5 / self.__movie_fps)
@DataFeatures.PipelineStepExit
def __exit__(self, exception_type, exception_value, exception_traceback):
@@ -182,33 +172,12 @@ class Movie(Cursor):
# Exit from Cursor context
super().__exit__(exception_type, exception_value, exception_traceback)
- # Close data stream
+ # Close data capture
self.stop()
# Stop reading thread
threading.Thread.join(self.__reading_thread)
- def refresh(self):
- """Refresh current frame."""
- self.__refresh = True
-
- def previous(self):
- """Go to previous frame."""
- self.__next_image_index -= 1
-
- # Clip image index
- if self.__next_image_index < 0:
- self.__next_image_index = 0
-
- def next(self):
- """Go to next frame."""
-
- self.__next_image_index += 1
-
- # Clip image index
- if self.__next_image_index < 0:
- self.__next_image_index = 0
-
@property
def duration(self) -> int|float:
"""Get movie duration."""
@@ -217,7 +186,7 @@ class Movie(Cursor):
@property
def progression(self) -> float:
- """Get movie processing progression between 0 and 1."""
+ """Get movie playback progression between 0 and 1."""
if self.__current_image_index is not None:
@@ -225,4 +194,93 @@ class Movie(Cursor):
else:
- return 0. \ No newline at end of file
+ return 0.
+
+class Camera(Cursor, ArFeatures.DataCaptureContext):
+ """Capture camera images and capture cursor position over OpenCV window.
+
+ !!! warning
+ It is assumed that an OpenCV window with the same name than the context is used to display context's pipeline image.
+ """
+ @DataFeatures.PipelineStepInit
+ def __init__(self, **kwargs):
+
+ # Init Cursor class
+ super().__init__()
+
+ # Init private attributes
+ self.__camera_id = None
+ self.__camera = None
+ self.__video_fps = None
+ self.__video_width = None
+ self.__video_height = None
+
+ @property
+ def identifier(self) -> int:
+ """Camera device id."""
+ return self.__camera_id
+
+ @identifier.setter
+ def identifier(self, camera_id: int):
+
+ self.__camera_id = camera_id
+
+ # Load movie
+ self.__camera = cv2.VideoCapture(self.__camera_id)
+ self.__video_fps = self.__camera.get(cv2.CAP_PROP_FPS)
+ self.__video_width = int(self.__camera.get(cv2.CAP_PROP_FRAME_WIDTH))
+ self.__video_height = int(self.__camera.get(cv2.CAP_PROP_FRAME_HEIGHT))
+
+ @DataFeatures.PipelineStepEnter
+ def __enter__(self):
+
+ logging.info('OpenCV.Camera context starts...')
+
+ # Enter in Cursor context
+ super().__enter__()
+
+ # Open reading thread
+ self.__reading_thread = threading.Thread(target=self.__read)
+
+ logging.debug('> starting reading thread...')
+ self.__reading_thread.start()
+
+ return self
+
+ def __read(self):
+ """Iterate on camera images."""
+
+ while self.is_running():
+
+ # Check pause event (and stop event)
+ while self.is_paused() and self.is_running():
+
+ logging.debug('> reading is paused at %i', current_image_time)
+
+ time.sleep(1)
+
+ # Select a new image
+ success, image = self.__camera.read()
+ image_time = self.__camera.get(cv2.CAP_PROP_POS_MSEC)
+
+ if success:
+
+ # Timestamp image
+ image = DataFeatures.TimestampedImage(image, timestamp=image_time)
+
+ # Process movie image
+ self._process_camera_image(timestamp=image_time, image=image)
+
+ @DataFeatures.PipelineStepExit
+ def __exit__(self, exception_type, exception_value, exception_traceback):
+
+ logging.info('OpenCV.Camera context stops...')
+
+ # Exit from Cursor context
+ super().__exit__(exception_type, exception_value, exception_traceback)
+
+ # Close data capture
+ self.stop()
+
+ # Stop reading thread
+ threading.Thread.join(self.__reading_thread)
diff --git a/src/argaze/utils/contexts/PupilLabs.py b/src/argaze/utils/contexts/PupilLabsInvisible.py
index d5a4319..5c9a138 100644
--- a/src/argaze/utils/contexts/PupilLabs.py
+++ b/src/argaze/utils/contexts/PupilLabsInvisible.py
@@ -21,6 +21,7 @@ __license__ = "GPLv3"
import sys
import logging
import time
+
import threading
from dataclasses import dataclass
@@ -33,17 +34,17 @@ import cv2
from pupil_labs.realtime_api.simple import discover_one_device
-class LiveStream(ArFeatures.ArContext):
+class LiveStream(ArFeatures.DataCaptureContext):
@DataFeatures.PipelineStepInit
def __init__(self, **kwargs):
- # Init ArContext class
+ # Init DataCaptureContext class
super().__init__()
def __enter__(self):
- logging.info('Pupil-Labs Device connexion starts...')
+ logging.info('Pupil-Labs Invisible connexion starts...')
# Init timestamp
self.__start_time = time.time()
@@ -76,9 +77,9 @@ class LiveStream(ArFeatures.ArContext):
def __stream_gaze(self):
"""Stream gaze."""
- logging.debug('Stream gaze from Pupil Device')
+ logging.debug('Stream gaze from Pupil Invisible')
- while not self._stop_event.is_set():
+ while self.is_running():
try:
while True:
@@ -107,9 +108,9 @@ class LiveStream(ArFeatures.ArContext):
def __stream_video(self):
"""Stream video."""
- logging.debug('Stream video from Pupil Device')
+ logging.debug('Stream video from Pupil Invisible')
- while not self._stop_event.is_set():
+ while self.is_running():
try:
while True:
@@ -132,7 +133,7 @@ class LiveStream(ArFeatures.ArContext):
logging.debug('Pupil-Labs context stops...')
# Close data stream
- self._stop_event.set()
+ self.stop()
# Stop streaming
threading.Thread.join(self.__gaze_thread)
diff --git a/src/argaze/utils/contexts/TobiiProGlasses2.py b/src/argaze/utils/contexts/TobiiProGlasses2.py
index 7f45f32..fbc9d1e 100644
--- a/src/argaze/utils/contexts/TobiiProGlasses2.py
+++ b/src/argaze/utils/contexts/TobiiProGlasses2.py
@@ -330,12 +330,12 @@ class TobiiJsonDataParser():
return MarkerPosition(data['marker3d'], data['marker2d'])
-class LiveStream(ArFeatures.LiveProcessingContext):
+class LiveStream(ArFeatures.DataCaptureContext):
@DataFeatures.PipelineStepInit
def __init__(self, **kwargs):
- # Init LiveProcessingContext class
+ # Init DataCaptureContext class
super().__init__()
# Init private attributes
@@ -629,7 +629,6 @@ class LiveStream(ArFeatures.LiveProcessingContext):
threading.Thread.join(self.__video_thread)
-
def __make_socket(self):
"""Create a socket to enable network communication."""
@@ -742,15 +741,15 @@ class LiveStream(ArFeatures.LiveProcessingContext):
# Check image validity
if image is None:
- # Wait for half frame time
- time.sleep(2 / self.__video_fps)
+ # Wait 1ms
+ time.sleep(1e-3)
continue
# Check image time validity
if image.time is None:
- # Wait for half frame time
- time.sleep(2 / self.__video_fps)
+ # Wait 1ms
+ time.sleep(1e-3)
continue
# Store first timestamp
@@ -786,9 +785,6 @@ class LiveStream(ArFeatures.LiveProcessingContext):
while not self._stop_event.is_set():
- # Wait for half frame time
- time.sleep(2 / self.__video_fps)
-
# Lock buffer access
with self.__video_buffer_lock:
@@ -812,6 +808,9 @@ class LiveStream(ArFeatures.LiveProcessingContext):
# Clear buffer
self.__video_buffer = None
+ # Wait 1ms
+ time.sleep(1e-3)
+
def __keep_alive(self):
"""Maintain network connection."""
@@ -975,6 +974,11 @@ class LiveStream(ArFeatures.LiveProcessingContext):
self.__calibration_id = None
def get_calibration_status(self) -> str:
+ """Get calibration status.
+
+ Returns:
+ status: 'calibrating', 'calibrated', 'stale', 'uncalibrated' or 'failed' string
+ """
return self.__calibration_status
@@ -1062,9 +1066,9 @@ class LiveStream(ArFeatures.LiveProcessingContext):
@DataFeatures.PipelineStepImage
def image(self, **kwargs):
"""
- Get pipeline image with live processing information.
+ Get pipeline image with data capture information.
"""
- logging.debug('LiveProcessingContext.image %s', self.name)
+ logging.debug('DataCaptureContext.image %s', self.name)
image = super().image(**kwargs)
height, width, _ = image.shape
@@ -1126,7 +1130,7 @@ class LiveStream(ArFeatures.LiveProcessingContext):
return image
-class PostProcessing(ArFeatures.PostProcessingContext):
+class SegmentPlayback(ArFeatures.DataPlaybackContext):
@DataFeatures.PipelineStepInit
def __init__(self, **kwargs):
@@ -1165,6 +1169,7 @@ class PostProcessing(ArFeatures.PostProcessingContext):
self.__sync_event_unit = None
self.__sync_event_factor = None
self.__sync_data_ts = None
+ self.__sync_video_ts = None
self.__sync_ts = None
self.__last_sync_data_ts = None
self.__last_sync_ts = None
@@ -1297,8 +1302,22 @@ class PostProcessing(ArFeatures.PostProcessingContext):
logging.debug('> read image at %i timestamp', video_ts)
- # Process camera image
- self._process_camera_image(timestamp=video_ts, image=video_image)
+ # if sync is required
+ if self.__sync_event is not None:
+
+ # Wait for a first sync event
+ if self.__sync_ts is not None:
+
+ self.__sync_video_ts = int(self.__sync_ts + video_ts - self.__sync_data_ts)
+
+ # Process camera image
+ self._process_camera_image(timestamp=self.__sync_video_ts, image=video_image)
+
+ # Otherwise, always process images
+ elif self.__sync_event is None:
+
+ # Process camera image
+ self._process_camera_image(timestamp=video_ts, image=video_image)
height, width, _ = video_image.shape
@@ -1343,15 +1362,17 @@ class PostProcessing(ArFeatures.PostProcessingContext):
logging.info('Difference between data and sync event timestamps is %i ms', diff_data_ts-diff_sync_ts)
- # Don't process gaze positions if sync is required but sync event not happened yet
- if self.__sync_event is not None and self.__sync_ts is None:
+ # Don't process gaze positions when:
+ # - no image have been processed yet
+ # - no sync event happened yet
+ if self.__sync_video_ts is None or self.__sync_ts is None:
- continue
+ continue
- # Otherwise, synchronize timestamp with sync event
- elif self.__sync_event is not None and self.__sync_ts is not None:
+ # Otherwise, synchronize timestamp with sync event
+ else:
- data_ts = int(self.__sync_ts + data_ts - self.__sync_data_ts)
+ data_ts = int(self.__sync_ts + data_ts - self.__sync_data_ts)
# Process gaze positions
match data_object_type:
@@ -1514,6 +1535,6 @@ class PostProcessing(ArFeatures.PostProcessingContext):
@property
def progression(self) -> float:
- """Get data processing progression between 0 and 1."""
+ """Get data playback progression between 0 and 1."""
return self.__progression \ No newline at end of file
diff --git a/src/argaze/utils/contexts/TobiiProGlasses3.py b/src/argaze/utils/contexts/TobiiProGlasses3.py
new file mode 100644
index 0000000..a53c095
--- /dev/null
+++ b/src/argaze/utils/contexts/TobiiProGlasses3.py
@@ -0,0 +1,128 @@
+"""Handle network connection to Tobii Pro G3 devices.
+ Based on Tobii Realtime Python API.
+ g3pylib must be installed.
+"""
+
+"""
+This program is free software: you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation, either version 3 of the License, or (at your option) any later
+version.
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+You should have received a copy of the GNU General Public License along with
+this program. If not, see <https://www.gnu.org/licenses/>.
+
+"""
+
+__author__ = "Damien Mouratille"
+__credits__ = []
+__copyright__ = "Copyright 2024, Ecole Nationale de l'Aviation Civile (ENAC)"
+__license__ = "GPLv3"
+
+import sys
+import logging
+import time
+import dill
+import threading
+from dataclasses import dataclass
+import numpy
+import cv2
+import asyncio
+import os
+
+from argaze import ArFeatures, DataFeatures, GazeFeatures
+from argaze.utils import UtilsFeatures
+
+
+from g3pylib import connect_to_glasses
+
+
+class LiveStream(ArFeatures.DataCaptureContext):
+
+ @DataFeatures.PipelineStepInit
+ def __init__(self, **kwargs):
+
+ # Init DataCaptureContext class
+ super().__init__()
+
+ def __enter__(self):
+
+ logging.info('Tobii Pro G3 connexion starts...')
+
+ # Init timestamp
+ self.__start_time = time.time()
+
+ self.__loop = asyncio.new_event_loop()
+ self.__loop.run_until_complete(self.__stream_rtsp())
+
+ return self
+
+ async def __stream_rtsp(self):
+ """Stream video and gaze."""
+
+ logging.info('Stream gaze from Tobii Pro G3')
+
+ while self.is_running():
+
+ try:
+ async with connect_to_glasses.with_zeroconf(True,10000) as g3:
+ async with g3.stream_rtsp(scene_camera=True, gaze=True) as streams:
+ async with streams.gaze.decode() as gaze_stream, streams.scene_camera.decode() as scene_stream:
+ while True:
+ frame, frame_timestamp = await scene_stream.get()
+ gaze, gaze_timestamp = await gaze_stream.get()
+ while gaze_timestamp is None or frame_timestamp is None:
+ if frame_timestamp is None:
+ frame, frame_timestamp = await scene_stream.get()
+ if gaze_timestamp is None:
+ gaze, gaze_timestamp = await gaze_stream.get()
+ while gaze_timestamp < frame_timestamp:
+ gaze, gaze_timestamp = await gaze_stream.get()
+ while gaze_timestamp is None:
+ gaze, gaze_timestamp = await gaze_stream.get()
+
+ scene_frame = frame.to_ndarray(format="bgr24")
+
+ gaze_timestamp = int((gaze_timestamp - self.__start_time) * 1e3)
+
+ logging.debug('Gaze received at %i timestamp', gaze_timestamp)
+
+ # If given gaze data
+ if "gaze2d" in gaze:
+ gaze2d = gaze["gaze2d"]
+ # Convert rational (x,y) to pixel location (x,y)
+ h, w = scene_frame.shape[:2]
+ gaze_scene = (int(gaze2d[0] * w), int(gaze2d[1] * h))
+
+
+ self._process_gaze_position(
+ timestamp=gaze_timestamp,
+ x=gaze_scene[0],
+ y=gaze_scene[1])
+ else:
+ # Process empty gaze position
+ logging.debug('Not worn at %i timestamp', gaze_timestamp)
+
+ scene_timestamp = int((frame_timestamp - self.__start_time) * 1e3)
+
+ logging.debug('Video received at %i timestamp', scene_timestamp)
+
+ self._process_camera_image(
+ timestamp=scene_timestamp,
+ image=scene_frame)
+
+ except KeyboardInterrupt:
+ pass
+
+
+
+ @DataFeatures.PipelineStepExit
+ def __exit__(self, exception_type, exception_value, exception_traceback):
+
+ logging.debug('Tobii Pro G3 context stops...')
+
+ # Close data stream
+ self.stop()
+
diff --git a/src/argaze/utils/demo/aruco_markers_pipeline.json b/src/argaze/utils/demo/aruco_markers_pipeline.json
index f29111b..8221cec 100644
--- a/src/argaze/utils/demo/aruco_markers_pipeline.json
+++ b/src/argaze/utils/demo/aruco_markers_pipeline.json
@@ -1,12 +1,12 @@
{
"argaze.ArUcoMarker.ArUcoCamera.ArUcoCamera": {
"name": "Head-mounted camera",
- "size": [1920, 1080],
+ "size": [1088, 1080],
"copy_background_into_scenes_frames": true,
"aruco_detector": {
"dictionary": "DICT_APRILTAG_16h5",
"parameters": {
- "useAruco3Detection": 1
+ "useAruco3Detection": true
}
},
"sides_mask": 420,
@@ -56,7 +56,7 @@
},
"frames": {
"GrayRectangle": {
- "size": [1920, 1149],
+ "size": [1088, 1080],
"background": "frame_background.jpg",
"gaze_movement_identifier": {
"argaze.GazeAnalysis.DispersionThresholdIdentification.GazeMovementIdentifier": {
@@ -67,11 +67,35 @@
"scan_path": {
"duration_max": 10000
},
+ "scan_path_analyzers": {
+ "argaze.GazeAnalysis.Basic.ScanPathAnalyzer": {},
+ "argaze.GazeAnalysis.KCoefficient.ScanPathAnalyzer": {},
+ "argaze.GazeAnalysis.NearestNeighborIndex.ScanPathAnalyzer": {
+ "size": [1088, 1080]
+ },
+ "argaze.GazeAnalysis.ExploreExploitRatio.ScanPathAnalyzer": {
+ "short_fixation_duration_threshold": 0
+ }
+ },
"layers": {
"demo_layer": {
"aoi_scene": "aoi_2d_scene.json",
"aoi_matcher": {
"argaze.GazeAnalysis.FocusPointInside.AOIMatcher": {}
+ },
+ "aoi_scan_path": {
+ "duration_max": 10000
+ },
+ "aoi_scan_path_analyzers": {
+ "argaze.GazeAnalysis.Basic.AOIScanPathAnalyzer": {},
+ "argaze.GazeAnalysis.TransitionMatrix.AOIScanPathAnalyzer": {},
+ "argaze.GazeAnalysis.KCoefficient.AOIScanPathAnalyzer": {},
+ "argaze.GazeAnalysis.LempelZivComplexity.AOIScanPathAnalyzer": {},
+ "argaze.GazeAnalysis.NGram.AOIScanPathAnalyzer": {
+ "n_min": 3,
+ "n_max": 3
+ },
+ "argaze.GazeAnalysis.Entropy.AOIScanPathAnalyzer":{}
}
}
},
@@ -116,17 +140,7 @@
}
}
}
- },
- "angle_tolerance": 15.0,
- "distance_tolerance": 2.54
- }
- },
- "observers": {
- "argaze.utils.UtilsFeatures.LookPerformanceRecorder": {
- "path": "_export/records/look_performance.csv"
- },
- "argaze.utils.UtilsFeatures.WatchPerformanceRecorder": {
- "path": "_export/records/watch_performance.csv"
+ }
}
}
}
diff --git a/src/argaze/utils/demo/gaze_analysis_pipeline.json b/src/argaze/utils/demo/gaze_analysis_pipeline.json
index 8b8212e..6e23321 100644
--- a/src/argaze/utils/demo/gaze_analysis_pipeline.json
+++ b/src/argaze/utils/demo/gaze_analysis_pipeline.json
@@ -1,7 +1,7 @@
{
"argaze.ArFeatures.ArFrame": {
"name": "GrayRectangle",
- "size": [1920, 1149],
+ "size": [1088, 1080],
"background": "frame_background.jpg",
"gaze_movement_identifier": {
"argaze.GazeAnalysis.DispersionThresholdIdentification.GazeMovementIdentifier": {
diff --git a/src/argaze/utils/demo/opencv_camera_context.json b/src/argaze/utils/demo/opencv_camera_context.json
new file mode 100644
index 0000000..b280c73
--- /dev/null
+++ b/src/argaze/utils/demo/opencv_camera_context.json
@@ -0,0 +1,7 @@
+{
+ "argaze.utils.contexts.OpenCV.Camera" : {
+ "name": "OpenCV camera",
+ "identifier": 0,
+ "pipeline": "aruco_markers_pipeline.json"
+ }
+} \ No newline at end of file
diff --git a/src/argaze/utils/demo/opencv_movie_context.json b/src/argaze/utils/demo/opencv_movie_context.json
index f7da7ee..930a0fc 100644
--- a/src/argaze/utils/demo/opencv_movie_context.json
+++ b/src/argaze/utils/demo/opencv_movie_context.json
@@ -1,6 +1,6 @@
{
"argaze.utils.contexts.OpenCV.Movie" : {
- "name": "OpenCV Window",
+ "name": "OpenCV movie",
"path": "./src/argaze/utils/demo/tobii_record/segments/1/fullstream.mp4",
"pipeline": "aruco_markers_pipeline.json"
}
diff --git a/src/argaze/utils/demo/pupillabs_invisible_live_stream_context.json b/src/argaze/utils/demo/pupillabs_invisible_live_stream_context.json
new file mode 100644
index 0000000..3418de6
--- /dev/null
+++ b/src/argaze/utils/demo/pupillabs_invisible_live_stream_context.json
@@ -0,0 +1,6 @@
+{
+ "argaze.utils.contexts.PupilLabsInvisible.LiveStream" : {
+ "name": "PupilLabs Invisible",
+ "pipeline": "aruco_markers_pipeline.json"
+ }
+} \ No newline at end of file
diff --git a/src/argaze/utils/demo/pupillabs_live_stream_context.json b/src/argaze/utils/demo/pupillabs_live_stream_context.json
deleted file mode 100644
index bcb7263..0000000
--- a/src/argaze/utils/demo/pupillabs_live_stream_context.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "argaze.utils.contexts.PupilLabs.LiveStream" : {
- "name": "PupilLabs",
- "pipeline": "aruco_markers_pipeline.json"
- }
-} \ No newline at end of file
diff --git a/src/argaze/utils/demo/recorders.py b/src/argaze/utils/demo/recorders.py
index 82022ce..979eaff 100644
--- a/src/argaze/utils/demo/recorders.py
+++ b/src/argaze/utils/demo/recorders.py
@@ -43,7 +43,7 @@ class FixationRecorder(UtilsFeatures.FileWriter):
log = (
timestamp,
- frame.last_gaze_movement().focus,
+ frame.last_gaze_movement().focus.value,
frame.last_gaze_movement().duration,
frame.layers['demo_layer'].last_looked_aoi_name()
)
@@ -117,61 +117,3 @@ class AOIScanPathAnalysisRecorder(UtilsFeatures.FileWriter):
)
self.write(log)
-
-
-class ArUcoMarkersPoseRecorder(DataFeatures.PipelineStepObject):
-
- @DataFeatures.PipelineStepInit
- def __init__(self, **kwargs):
-
- # Init private attributes
- self.__output_folder = None
- self.__size = None
-
- @property
- def output_folder(self) -> str:
- """folder path where to write ArUco markers pose."""
- return self.__output_folder
-
- @output_folder.setter
- def output_folder(self, output_folder: str):
-
- self.__output_folder = output_folder
-
- @property
- def size(self) -> float:
- """Expected size in centimeters of detected markers."""
- return self.__output_folder
-
- @size.setter
- def size(self, size: float):
-
- self.__size = size
-
- @property
- def ids(self) -> list:
- """Ids of markers to estimate pose (default all)."""
- return self.__ids
-
- @ids.setter
- def ids(self, ids: list):
-
- self.__ids = ids
-
- def on_detect_markers(self, timestamp, aruco_detector, exception):
-
- logging.info('%s writes estimated markers pose into %s', DataFeatures.get_class_path(self), self.__output_folder)
-
- if self.__size is not None:
-
- # Estimate all detected markers pose
- aruco_detector.estimate_markers_pose(self.__size, ids = self.__ids)
-
- # Build ArUco markers group from detected markers
- aruco_markers_group = ArUcoMarkerGroup.ArUcoMarkerGroup(dictionary=aruco_detector.dictionary, places=aruco_detector.detected_markers())
-
- if self.__output_folder is not None:
-
- # Write ArUco markers group
- aruco_markers_group.to_obj(f'{self.__output_folder}/{int(timestamp)}-aruco_markers_group.obj')
- \ No newline at end of file
diff --git a/src/argaze/utils/demo/tobii_live_stream_context.json b/src/argaze/utils/demo/tobii_g2_live_stream_context.json
index 6950617..6950617 100644
--- a/src/argaze/utils/demo/tobii_live_stream_context.json
+++ b/src/argaze/utils/demo/tobii_g2_live_stream_context.json
diff --git a/src/argaze/utils/demo/tobii_g3_live_stream_context.json b/src/argaze/utils/demo/tobii_g3_live_stream_context.json
new file mode 100644
index 0000000..20f6ab1
--- /dev/null
+++ b/src/argaze/utils/demo/tobii_g3_live_stream_context.json
@@ -0,0 +1,6 @@
+{
+ "argaze.utils.contexts.TobiiProGlasses3.LiveStream" : {
+ "name": "Tobii Pro Glasses 3 live stream",
+ "pipeline": "aruco_markers_pipeline.json"
+ }
+} \ No newline at end of file
diff --git a/src/argaze/utils/demo/tobii_post_processing_context.json b/src/argaze/utils/demo/tobii_segment_playback_context.json
index 7a73512..d481b23 100644
--- a/src/argaze/utils/demo/tobii_post_processing_context.json
+++ b/src/argaze/utils/demo/tobii_segment_playback_context.json
@@ -1,6 +1,6 @@
{
- "argaze.utils.contexts.TobiiProGlasses2.PostProcessing" : {
- "name": "Tobii Pro Glasses 2 post-processing",
+ "argaze.utils.contexts.TobiiProGlasses2.SegmentPlayback" : {
+ "name": "Tobii Pro Glasses 2 segment playback",
"segment": "./src/argaze/utils/demo/tobii_record/segments/1",
"pipeline": "aruco_markers_pipeline.json"
}
diff --git a/src/argaze/utils/estimate_markers_pose/observers.py b/src/argaze/utils/estimate_markers_pose/observers.py
index 88da4f9..bbca1ad 100644
--- a/src/argaze/utils/estimate_markers_pose/observers.py
+++ b/src/argaze/utils/estimate_markers_pose/observers.py
@@ -18,6 +18,8 @@ __copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)"
__license__ = "GPLv3"
import logging
+import os
+import pathlib
from argaze import DataFeatures
from argaze.ArUcoMarker import ArUcoMarkerGroup
@@ -41,7 +43,11 @@ class ArUcoMarkersPoseRecorder(DataFeatures.PipelineStepObject):
@output_folder.setter
def output_folder(self, output_folder: str):
- self.__output_folder = output_folder
+ self.__output_folder = pathlib.Path(output_folder)
+
+ if not os.path.exists(self.__output_folder.absolute()):
+
+ os.makedirs(self.__output_folder.absolute())
def on_detect_markers(self, timestamp, aruco_detector, exception):
diff --git a/src/argaze/utils/estimate_markers_pose/pipeline.json b/src/argaze/utils/estimate_markers_pose/pipeline.json
index 2e0ab76..c16cce3 100644
--- a/src/argaze/utils/estimate_markers_pose/pipeline.json
+++ b/src/argaze/utils/estimate_markers_pose/pipeline.json
@@ -7,7 +7,7 @@
"pose_size": 4,
"pose_ids": [],
"parameters": {
- "useAruco3Detection": 1
+ "useAruco3Detection": true
},
"observers":{
"observers.ArUcoMarkersPoseRecorder": {