aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--docs/img/4flight_visual_pattern.pngbin0 -> 331959 bytes
-rw-r--r--docs/img/4flight_workspace.pngbin0 -> 311033 bytes
-rw-r--r--docs/img/argaze_load_gui.pngbin168761 -> 151200 bytes
-rw-r--r--docs/img/argaze_load_gui_haiku.pngbin0 -> 321011 bytes
-rw-r--r--docs/img/argaze_load_gui_pfe.pngbin0 -> 454631 bytes
-rw-r--r--docs/img/argaze_pipeline.pngbin92231 -> 92553 bytes
-rw-r--r--docs/index.md2
-rw-r--r--docs/use_cases/air_controller_gaze_study/context.md22
-rw-r--r--docs/use_cases/air_controller_gaze_study/introduction.md48
-rw-r--r--docs/use_cases/air_controller_gaze_study/observers.md90
-rw-r--r--docs/use_cases/air_controller_gaze_study/pipeline.md366
-rw-r--r--docs/use_cases/gaze_based_candidate_selection/context.md7
-rw-r--r--docs/use_cases/gaze_based_candidate_selection/introduction.md12
-rw-r--r--docs/use_cases/gaze_based_candidate_selection/observers.md6
-rw-r--r--docs/use_cases/gaze_based_candidate_selection/pipeline.md6
-rw-r--r--docs/use_cases/pilot_gaze_monitoring/context.md11
-rw-r--r--docs/use_cases/pilot_gaze_monitoring/introduction.md7
-rw-r--r--docs/use_cases/pilot_gaze_monitoring/observers.md4
-rw-r--r--docs/use_cases/pilot_gaze_monitoring/pipeline.md49
-rw-r--r--docs/user_guide/aruco_marker_pipeline/advanced_topics/aruco_detector_configuration.md4
-rw-r--r--docs/user_guide/aruco_marker_pipeline/advanced_topics/optic_parameters_calibration.md2
-rw-r--r--docs/user_guide/aruco_marker_pipeline/advanced_topics/scripting.md2
-rw-r--r--docs/user_guide/aruco_marker_pipeline/aoi_3d_description.md2
-rw-r--r--docs/user_guide/aruco_marker_pipeline/configuration_and_execution.md2
-rw-r--r--docs/user_guide/eye_tracking_context/advanced_topics/context_definition.md82
-rw-r--r--docs/user_guide/eye_tracking_context/advanced_topics/scripting.md8
-rw-r--r--docs/user_guide/eye_tracking_context/advanced_topics/timestamped_gaze_positions_edition.md4
-rw-r--r--docs/user_guide/eye_tracking_context/configuration_and_execution.md6
-rw-r--r--docs/user_guide/eye_tracking_context/context_modules/opencv.md18
-rw-r--r--docs/user_guide/eye_tracking_context/context_modules/tobii_pro_glasses_2.md8
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/advanced_topics/gaze_position_calibration.md2
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md4
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md5
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/background.md4
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/heatmap.md4
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/visualization.md2
-rw-r--r--docs/user_guide/utils/demonstrations_scripts.md42
-rw-r--r--docs/user_guide/utils/estimate_aruco_markers_pose.md4
-rw-r--r--docs/user_guide/utils/main_commands.md4
-rw-r--r--mkdocs.yml10
-rw-r--r--pyproject.toml4
-rw-r--r--src/argaze.test/ArUcoMarker/ArUcoCamera.py4
-rw-r--r--src/argaze.test/ArUcoMarker/utils/aruco_camera.json8
-rw-r--r--src/argaze/ArFeatures.py77
-rw-r--r--src/argaze/ArUcoMarker/ArUcoDetector.py101
-rw-r--r--src/argaze/ArUcoMarker/ArUcoMarkerGroup.py2
-rw-r--r--src/argaze/DataFeatures.py24
-rw-r--r--src/argaze/GazeAnalysis/KCoefficient.py8
-rw-r--r--src/argaze/__main__.py32
-rw-r--r--src/argaze/utils/UtilsFeatures.py43
-rw-r--r--src/argaze/utils/contexts/OpenCV.py166
-rw-r--r--src/argaze/utils/contexts/PupilLabsInvisible.py (renamed from src/argaze/utils/contexts/PupilLabs.py)17
-rw-r--r--src/argaze/utils/contexts/TobiiProGlasses2.py65
-rw-r--r--src/argaze/utils/contexts/TobiiProGlasses3.py128
-rw-r--r--src/argaze/utils/demo/aruco_markers_pipeline.json42
-rw-r--r--src/argaze/utils/demo/gaze_analysis_pipeline.json2
-rw-r--r--src/argaze/utils/demo/opencv_camera_context.json7
-rw-r--r--src/argaze/utils/demo/opencv_movie_context.json2
-rw-r--r--src/argaze/utils/demo/pupillabs_invisible_live_stream_context.json6
-rw-r--r--src/argaze/utils/demo/pupillabs_live_stream_context.json6
-rw-r--r--src/argaze/utils/demo/recorders.py60
-rw-r--r--src/argaze/utils/demo/tobii_g2_live_stream_context.json (renamed from src/argaze/utils/demo/tobii_live_stream_context.json)0
-rw-r--r--src/argaze/utils/demo/tobii_g3_live_stream_context.json6
-rw-r--r--src/argaze/utils/demo/tobii_segment_playback_context.json (renamed from src/argaze/utils/demo/tobii_post_processing_context.json)4
-rw-r--r--src/argaze/utils/estimate_markers_pose/observers.py8
-rw-r--r--src/argaze/utils/estimate_markers_pose/pipeline.json2
-rw-r--r--utils/processTobiiRecords.sh8
67 files changed, 1191 insertions, 490 deletions
diff --git a/docs/img/4flight_visual_pattern.png b/docs/img/4flight_visual_pattern.png
new file mode 100644
index 0000000..0550063
--- /dev/null
+++ b/docs/img/4flight_visual_pattern.png
Binary files differ
diff --git a/docs/img/4flight_workspace.png b/docs/img/4flight_workspace.png
new file mode 100644
index 0000000..f899ab2
--- /dev/null
+++ b/docs/img/4flight_workspace.png
Binary files differ
diff --git a/docs/img/argaze_load_gui.png b/docs/img/argaze_load_gui.png
index b8874b2..e012adc 100644
--- a/docs/img/argaze_load_gui.png
+++ b/docs/img/argaze_load_gui.png
Binary files differ
diff --git a/docs/img/argaze_load_gui_haiku.png b/docs/img/argaze_load_gui_haiku.png
new file mode 100644
index 0000000..6a4e1ec
--- /dev/null
+++ b/docs/img/argaze_load_gui_haiku.png
Binary files differ
diff --git a/docs/img/argaze_load_gui_pfe.png b/docs/img/argaze_load_gui_pfe.png
new file mode 100644
index 0000000..0e622d3
--- /dev/null
+++ b/docs/img/argaze_load_gui_pfe.png
Binary files differ
diff --git a/docs/img/argaze_pipeline.png b/docs/img/argaze_pipeline.png
index 953cbba..61606b2 100644
--- a/docs/img/argaze_pipeline.png
+++ b/docs/img/argaze_pipeline.png
Binary files differ
diff --git a/docs/index.md b/docs/index.md
index 2b668a3..ca9271a 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -14,7 +14,7 @@ By offering a wide array of gaze metrics and supporting easy extension to incorp
## Eye tracking context
-**ArGaze** facilitates the integration of both **screen-based and head-mounted** eye tracking systems for **real-time and/or post-processing analysis**.
+**ArGaze** facilitates the integration of both **screen-based and head-mounted** eye tracking systems for **live data capture and afterward data playback**.
[Learn how to handle various eye tracking context by reading the dedicated user guide section](./user_guide/eye_tracking_context/introduction.md).
diff --git a/docs/use_cases/air_controller_gaze_study/context.md b/docs/use_cases/air_controller_gaze_study/context.md
new file mode 100644
index 0000000..5b13ca5
--- /dev/null
+++ b/docs/use_cases/air_controller_gaze_study/context.md
@@ -0,0 +1,22 @@
+Data playback context
+======================
+
+The context handles incoming eye tracker data before to pass them to a processing pipeline.
+
+## data_playback_context.json
+
+For this use case we need to read Tobii Pro Glasses 2 records: **ArGaze** provides a [ready-made context](../../user_guide/eye_tracking_context/context_modules/tobii_pro_glasses_2.md) class to playback data from records made by this device.
+
+While *segment* entry is specific to the [TobiiProGlasses2.SegmentPlayback](../../argaze.md/#argaze.utils.contexts.TobiiProGlasses2.SegmentPlayback) class, *name* and *pipeline* entries are part of the parent [ArContext](../../argaze.md/#argaze.ArFeatures.ArContext) class.
+
+```json
+{
+ "argaze.utils.contexts.TobiiProGlasses2.SegmentPlayback": {
+ "name": "Tobii Pro Glasses 2 segment playback",
+ "segment": "/Volumes/projects/fbr6k3e/records/4rcbdzk/segments/1",
+ "pipeline": "post_processing_pipeline.json"
+ }
+}
+```
+
+The [post_processing_pipeline.json](pipeline.md) file mentioned aboved is described in the next chapter.
diff --git a/docs/use_cases/air_controller_gaze_study/introduction.md b/docs/use_cases/air_controller_gaze_study/introduction.md
new file mode 100644
index 0000000..f188eec
--- /dev/null
+++ b/docs/use_cases/air_controller_gaze_study/introduction.md
@@ -0,0 +1,48 @@
+Post-processing head-mounted eye tracking records
+=================================================
+
+**ArGaze** enabled a study of air traffic controller gaze strategy.
+
+The following use case has integrated the [ArUco marker pipeline](../../user_guide/aruco_marker_pipeline/introduction.md) to map air traffic controllers gaze onto multiple screens environment in post-processing then, enable scan path study using the [gaze analysis pipeline](../../user_guide/gaze_analysis_pipeline/introduction.md).
+
+## Background
+
+The next-gen air traffic control system (4Flight) aims to enhance the operational capacity of the en-route control center by offering new tools to air traffic controllers. However, it entails significant changes in their working method, which will consequently have an impact on how they are trained.
+Several research projects on visual patterns of air traffic controllers indicate the urgent need to improve the effectiveness of training in visual information seeking behavior.
+An exploratory study was initiated by a group of trainee air traffic controllers with the aim of analyzing the visual patterns of novice controllers and instructors, intending to propose guidelines regarding the visual pattern for training.
+
+## Environment
+
+The 4Flight control position consists of two screens: the first displays the radar image along with other information regarding the observed sector, the second displays the agenda, which allows the controller to link conflicting aircraft by creating data blocks, and the Dyp info, which displays some information about the flight.
+During their training, controllers are taught to visually follow all aircraft streams along a given route, focusing on their planned flight path and potential interactions with other aircraft.
+
+![4Flight Workspace](../../img/4flight_workspace.png)
+
+A traffic simulation of moderate difficulty with a maximum of 13 and 16 aircraft simultaneously was performed by air traffic controllers. The controller could encounter lateral conflicts (same altitude) between 2 and 3 aircraft and conflicts between aircraft that need to ascend or descend within the sector.
+After the simulation, a directed interview about the gaze pattern was conducted.
+Eye tracking data was recorded with a Tobii Pro Glasses 2, a head-mounted eye tracker.
+The gaze and scene camera video were captured with Tobii Pro Lab software and post-processed with **ArGaze** software library.
+As the eye tracker model is head mounted, ArUco markers were placed around the two screens to ensure that several of them were always visible in the field of view of the eye tracker camera.
+
+Various metrics were exported with specific pipeline observers, including average fixation duration, explore/exploit ratio, K-coefficient, AOI distribution, transition matrix, entropy and N-grams.
+Although statistical analysis is not possible due to the small sample size of the study (6 instructors, 5 qualified controllers, and 5 trainees), visual pattern summaries have been manually built from transition matrix export to produce a qualitative interpretation showing what instructors attend during training and how qualified controllers work. Red arcs are more frequent than the blue ones. Instructors (Fig. a) and four different qualified controllers (Fig. b, c, d, e).
+
+![4Flight Visual pattern](../../img/4flight_visual_pattern.png)
+
+## Setup
+
+The setup to integrate **ArGaze** to the experiment is defined by 3 main files detailled in the next chapters:
+
+* The context file that playback gaze data and scene camera video records: [data_playback_context.json](context.md)
+* The pipeline file that processes gaze data and scene camera video: [post_processing_pipeline.json](pipeline.md)
+* The observers file that exports analysis outputs: [observers.py](observers.md)
+
+As any **ArGaze** setup, it is loaded by executing the [*load* command](../../user_guide/utils/main_commands.md):
+
+```shell
+python -m argaze load segment_playback_context.json
+```
+
+This command opens one GUI window per frame (one for the scene camera, one for the sector screen and one for the info screen) that allow to monitor gaze mapping while processing.
+
+![ArGaze load GUI for PFE study](../../img/argaze_load_gui_pfe.png)
diff --git a/docs/use_cases/air_controller_gaze_study/observers.md b/docs/use_cases/air_controller_gaze_study/observers.md
new file mode 100644
index 0000000..500d573
--- /dev/null
+++ b/docs/use_cases/air_controller_gaze_study/observers.md
@@ -0,0 +1,90 @@
+Metrics and video recording
+===========================
+
+Observers are attached to pipeline steps to be notified when a method is called.
+
+## observers.py
+
+For this use case we need to record gaze analysis metrics on *ArUcoCamera.on_look* call and to record sector screen image on *ArUcoCamera.on_copy_background_into_scenes_frames* signal.
+
+```python
+import logging
+
+from argaze.utils import UtilsFeatures
+
+import cv2
+
+class ScanPathAnalysisRecorder(UtilsFeatures.FileWriter):
+
+ def __init__(self, **kwargs):
+
+ super().__init__(**kwargs)
+
+ self.header = "Timestamp (ms)", "Path duration (ms)", "Steps number", "Fixation durations average (ms)", "Explore/Exploit ratio", "K coefficient"
+
+ def on_look(self, timestamp, frame, exception):
+ """Log scan path metrics."""
+
+ if frame.is_analysis_available():
+
+ analysis = frame.analysis()
+
+ data = (
+ int(timestamp),
+ analysis['argaze.GazeAnalysis.Basic.ScanPathAnalyzer'].path_duration,
+ analysis['argaze.GazeAnalysis.Basic.ScanPathAnalyzer'].steps_number,
+ analysis['argaze.GazeAnalysis.Basic.ScanPathAnalyzer'].step_fixation_durations_average,
+ analysis['argaze.GazeAnalysis.ExploreExploitRatio.ScanPathAnalyzer'].explore_exploit_ratio,
+ analysis['argaze.GazeAnalysis.KCoefficient.ScanPathAnalyzer'].K
+ )
+
+ self.write(data)
+
+class AOIScanPathAnalysisRecorder(UtilsFeatures.FileWriter):
+
+ def __init__(self, **kwargs):
+
+ super().__init__(**kwargs)
+
+ self.header = "Timestamp (ms)", "Path duration (ms)", "Steps number", "Fixation durations average (ms)", "Transition matrix probabilities", "Transition matrix density", "N-Grams count", "Stationary entropy", "Transition entropy"
+
+ def on_look(self, timestamp, layer, exception):
+ """Log aoi scan path metrics"""
+
+ if layer.is_analysis_available():
+
+ analysis = layer.analysis()
+
+ data = (
+ int(timestamp),
+ analysis['argaze.GazeAnalysis.Basic.AOIScanPathAnalyzer'].path_duration,
+ analysis['argaze.GazeAnalysis.Basic.AOIScanPathAnalyzer'].steps_number,
+ analysis['argaze.GazeAnalysis.Basic.AOIScanPathAnalyzer'].step_fixation_durations_average,
+ analysis['argaze.GazeAnalysis.TransitionMatrix.AOIScanPathAnalyzer'].transition_matrix_probabilities,
+ analysis['argaze.GazeAnalysis.TransitionMatrix.AOIScanPathAnalyzer'].transition_matrix_density,
+ analysis['argaze.GazeAnalysis.NGram.AOIScanPathAnalyzer'].ngrams_count,
+ analysis['argaze.GazeAnalysis.Entropy.AOIScanPathAnalyzer'].stationary_entropy,
+ analysis['argaze.GazeAnalysis.Entropy.AOIScanPathAnalyzer'].transition_entropy
+ )
+
+ self.write(data)
+
+class VideoRecorder(UtilsFeatures.VideoWriter):
+
+ def __init__(self, **kwargs):
+
+ super().__init__(**kwargs)
+
+ def on_copy_background_into_scenes_frames(self, timestamp, frame, exception):
+ """Write frame image."""
+
+ logging.debug('VideoRecorder.on_map')
+
+ image = frame.image()
+
+ # Write video timing
+ cv2.rectangle(image, (0, 0), (550, 50), (63, 63, 63), -1)
+ cv2.putText(image, f'Time: {int(timestamp)} ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+
+ self.write(image)
+```
diff --git a/docs/use_cases/air_controller_gaze_study/pipeline.md b/docs/use_cases/air_controller_gaze_study/pipeline.md
new file mode 100644
index 0000000..b1df62a
--- /dev/null
+++ b/docs/use_cases/air_controller_gaze_study/pipeline.md
@@ -0,0 +1,366 @@
+Post processing pipeline
+========================
+
+The pipeline processes camera image and gaze data to enable gaze mapping and gaze analysis.
+
+## post_processing_pipeline.json
+
+For this use case we need to detect ArUco markers to enable gaze mapping: **ArGaze** provides the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) class to setup an [ArUco markers pipeline](../../user_guide/aruco_marker_pipeline/introduction.md).
+
+```json
+{
+ "argaze.ArUcoMarker.ArUcoCamera.ArUcoCamera": {
+ "name": "ATC_Study",
+ "size": [1920, 1080],
+ "sides_mask": 420,
+ "copy_background_into_scenes_frames": true,
+ "aruco_detector": {
+ "dictionary": "DICT_APRILTAG_16h5",
+ "optic_parameters": "optic_parameters.json",
+ "parameters": {
+ "adaptiveThreshConstant": 20,
+ "useAruco3Detection": true
+ }
+ },
+ "gaze_movement_identifier": {
+ "argaze.GazeAnalysis.DispersionThresholdIdentification.GazeMovementIdentifier": {
+ "deviation_max_threshold": 25,
+ "duration_min_threshold": 150
+ }
+ },
+ "layers": {
+ "Main" : {
+ "aoi_matcher": {
+ "argaze.GazeAnalysis.DeviationCircleCoverage.AOIMatcher": {
+ "coverage_threshold": 0.5
+ }
+ },
+ "aoi_scan_path" : {
+ "duration_max": 60000
+ },
+ "aoi_scan_path_analyzers": {
+ "argaze.GazeAnalysis.Basic.AOIScanPathAnalyzer": {},
+ "argaze.GazeAnalysis.TransitionMatrix.AOIScanPathAnalyzer": {},
+ "argaze.GazeAnalysis.NGram.AOIScanPathAnalyzer": {
+ "n_min": 3,
+ "n_max": 5
+ },
+ "argaze.GazeAnalysis.Entropy.AOIScanPathAnalyzer": {}
+ },
+ "observers": {
+ "observers.AOIScanPathAnalysisRecorder": {
+ "path": "aoi_metrics.csv"
+ }
+ }
+ }
+ },
+ "image_parameters": {
+ "background_weight": 1,
+ "draw_gaze_positions": {
+ "color": [0, 255, 255],
+ "size": 4
+ },
+ "draw_detected_markers": {
+ "color": [0, 255, 0]
+ },
+ "draw_layers": {
+ "Main": {
+ "draw_aoi_scene": {
+ "draw_aoi": {
+ "color": [255, 255, 255],
+ "border_size": 1
+ }
+ },
+ "draw_aoi_matching": {
+ "update_looked_aoi": true,
+ "draw_looked_aoi": {
+ "color": [0, 255, 0],
+ "border_size": 2
+ },
+ "looked_aoi_name_color": [255, 255, 255],
+ "looked_aoi_name_offset": [0, -10]
+ }
+ }
+ }
+ },
+ "scenes": {
+ "Workspace": {
+ "aruco_markers_group": "workspace_markers.obj",
+ "layers": {
+ "Main" : {
+ "aoi_scene": "workspace_aois.obj"
+ }
+ },
+ "frames": {
+ "Sector_Screen": {
+ "size": [1080, 1017],
+ "gaze_movement_identifier": {
+ "argaze.GazeAnalysis.DispersionThresholdIdentification.GazeMovementIdentifier": {
+ "deviation_max_threshold": 25,
+ "duration_min_threshold": 150
+ }
+ },
+ "scan_path": {
+ "duration_max": 30000
+ },
+ "scan_path_analyzers": {
+ "argaze.GazeAnalysis.Basic.ScanPathAnalyzer": {},
+ "argaze.GazeAnalysis.ExploreExploitRatio.ScanPathAnalyzer": {
+ "short_fixation_duration_threshold": 0
+ },
+ "argaze.GazeAnalysis.KCoefficient.ScanPathAnalyzer": {}
+ },
+ "layers" :{
+ "Main": {
+ "aoi_scene": "sector_screen_aois.svg"
+ }
+ },
+ "heatmap": {
+ "size": [80, 60]
+ },
+ "image_parameters": {
+ "background_weight": 1,
+ "heatmap_weight": 0.5,
+ "draw_gaze_positions": {
+ "color": [0, 127, 127],
+ "size": 4
+ },
+ "draw_scan_path": {
+ "draw_fixations": {
+ "deviation_circle_color": [255, 255, 255],
+ "duration_border_color": [0, 127, 127],
+ "duration_factor": 1e-2
+ },
+ "draw_saccades": {
+ "line_color": [0, 255, 255]
+ },
+ "deepness": 0
+ },
+ "draw_layers": {
+ "Main": {
+ "draw_aoi_scene": {
+ "draw_aoi": {
+ "color": [255, 255, 255],
+ "border_size": 1
+ }
+ },
+ "draw_aoi_matching": {
+ "draw_matched_fixation": {
+ "deviation_circle_color": [255, 255, 255],
+ "draw_positions": {
+ "position_color": [0, 255, 0],
+ "line_color": [0, 0, 0]
+ }
+ },
+ "draw_looked_aoi": {
+ "color": [0, 255, 0],
+ "border_size": 2
+ },
+ "looked_aoi_name_color": [255, 255, 255],
+ "looked_aoi_name_offset": [10, 10]
+ }
+ }
+ }
+ },
+ "observers": {
+ "observers.ScanPathAnalysisRecorder": {
+ "path": "sector_screen.csv"
+ },
+ "observers.VideoRecorder": {
+ "path": "sector_screen.mp4",
+ "width": 1080,
+ "height": 1024,
+ "fps": 25
+ }
+ }
+ },
+ "Info_Screen": {
+ "size": [640, 1080],
+ "layers" : {
+ "Main": {
+ "aoi_scene": "info_screen_aois.svg"
+ }
+ }
+ }
+ }
+ }
+ },
+ "observers": {
+ "argaze.utils.UtilsFeatures.LookPerformanceRecorder": {
+ "path": "_export/look_performance.csv"
+ },
+ "argaze.utils.UtilsFeatures.WatchPerformanceRecorder": {
+ "path": "_export/watch_performance.csv"
+ }
+ }
+ }
+}
+```
+
+All the files mentioned aboved are described below.
+
+The *ScanPathAnalysisRecorder* and *AOIScanPathAnalysisRecorder* observers objects are defined into the [observers.py](observers.md) file that is described in the next chapter.
+
+## optic_parameters.json
+
+This file defines the Tobii Pro glasses 2 scene camera optic parameters which has been calculated as explained into [the camera calibration chapter](../../user_guide/aruco_marker_pipeline/advanced_topics/optic_parameters_calibration.md).
+
+```json
+{
+ "rms": 0.6688921504088245,
+ "dimensions": [
+ 1920,
+ 1080
+ ],
+ "K": [
+ [
+ 1135.6524381415752,
+ 0.0,
+ 956.0685325355497
+ ],
+ [
+ 0.0,
+ 1135.9272506869524,
+ 560.059099810324
+ ],
+ [
+ 0.0,
+ 0.0,
+ 1.0
+ ]
+ ],
+ "D": [
+ 0.01655492265003404,
+ 0.1985524264972037,
+ 0.002129965902489484,
+ -0.0019528582922179365,
+ -0.5792910353639452
+ ]
+}
+```
+
+## workspace_markers.obj
+
+This file defines the place where are the ArUco markers into the workspace geometry. Markers' positions have been edited in Blender software from a 3D model of the workspace built manually then exported at OBJ format.
+
+```obj
+# Blender v3.0.1 OBJ File: 'workspace.blend'
+# www.blender.org
+o DICT_APRILTAG_16h5#1_Marker
+v -2.532475 48.421242 0.081627
+v 2.467094 48.355682 0.077174
+v 2.532476 53.352734 -0.081634
+v -2.467093 53.418293 -0.077182
+s off
+f 1 2 3 4
+o DICT_APRILTAG_16h5#6_Marker
+v 88.144676 23.084166 -0.070246
+v 93.144661 23.094980 -0.072225
+v 93.133904 28.092941 0.070232
+v 88.133919 28.082127 0.072211
+s off
+f 5 6 7 8
+o DICT_APRILTAG_16h5#2_Marker
+v -6.234516 27.087950 0.176944
+v -1.244015 27.005413 -0.119848
+v -1.164732 32.004459 -0.176936
+v -6.155232 32.086998 0.119855
+s off
+f 9 10 11 12
+o DICT_APRILTAG_16h5#3_Marker
+v -2.518053 -2.481743 -0.018721
+v 2.481756 -2.518108 0.005601
+v 2.518059 2.481743 0.018721
+v -2.481749 2.518108 -0.005601
+s off
+f 13 14 15 16
+o DICT_APRILTAG_16h5#5_Marker
+v 48.746418 48.319012 -0.015691
+v 53.746052 48.374046 0.009490
+v 53.690983 53.373741 0.015698
+v 48.691349 53.318699 -0.009490
+s off
+f 17 18 19 20
+o DICT_APRILTAG_16h5#4_Marker
+v 23.331947 -3.018721 5.481743
+v 28.331757 -2.994399 5.518108
+v 28.368059 -2.981279 0.518257
+v 23.368252 -3.005600 0.481892
+s off
+f 21 22 23 24
+
+```
+
+## workspace_aois.obj
+
+This file defines the place of the AOI into the workspace geometry. AOI positions have been edited in [Blender software](https://www.blender.org/) from a 3D model of the workspace built manually then exported at OBJ format.
+
+```obj
+# Blender v3.0.1 OBJ File: 'workspace.blend'
+# www.blender.org
+o Sector_Screen
+v 0.000000 1.008786 0.000000
+v 51.742416 1.008786 0.000000
+v 0.000000 52.998108 0.000000
+v 51.742416 52.998108 0.000000
+s off
+f 1 2 4 3
+o Info_Screen
+v 56.407101 0.000000 0.000000
+v 91.407104 0.000000 0.000000
+v 56.407101 52.499996 0.000000
+v 91.407104 52.499996 0.000000
+s off
+f 5 6 8 7
+
+```
+
+## sector_screen_aois.svg
+
+This file defines the place of the AOI into the sector screen frame. AOI positions have been edited [Inkscape software](https://inkscape.org/fr/) from a screenshot of the sector screen then exported at SVG format.
+
+```svg
+<svg >
+ <path id="Area_1" d="M317.844,198.526L507.431,426.837L306.453,595.073L110.442,355.41L317.844,198.526Z"/>
+ <path id="Area_2" d="M507.431,426.837L611.554,563.624L444.207,750.877L306.453,595.073L507.431,426.837Z"/>
+ <path id="Area_3" d="M395.175,1017L444.207,750.877L611.554,563.624L1080,954.462L1080,1017L395.175,1017Z"/>
+ <path id="Area_4" d="M611.554,563.624L756.528,293.236L562.239,198.526L471.45,382.082L611.554,563.624Z"/>
+ <path id="Area_5" d="M0,900.683L306.453,595.073L444.207,750.877L395.175,1017L0,1017L0,900.683Z"/>
+ <path id="Area_6" d="M471.45,381.938L557.227,207.284L354.832,65.656L237.257,104.014L471.45,381.938Z"/>
+ <path id="Area_7" d="M0,22.399L264.521,24.165L318.672,77.325L237.257,103.625L248.645,118.901L0,80.963L0,22.399Z"/>
+</svg>
+```
+
+## info_screen_aois.svg
+
+This file defines the place of the AOI into the info screen frame. AOI positions have been edited [Inkscape software](https://inkscape.org/fr/) from a screenshot of the info screen then exported at SVG format.
+
+```svg
+<svg >
+ <rect id="Strips" x="0" y="880" width="640" height="200"/>
+</svg>
+```
+
+## aoi_metrics.csv
+
+The file contains all the metrics recorded by the *AOIScanPathAnalysisRecorder* objects as defined into the [observers.py](observers.md) file.
+
+## sector_screen.csv
+
+The file contains all the metrics recorded by the *ScanPathAnalysisRecorder* objects as defined into the [observers.py](observers.md) file.
+
+## sector_screen.mp4
+
+The video file is a record of the sector screen frame image.
+
+## look_performance.csv
+
+This file contains the logs of *ArUcoCamera.look* method execution info. It is created into an *_export* folder from where the [*load* command](../../user_guide/utils/main_commands.md) is launched.
+
+On a MacBookPro (2,3GHz Intel Core i9 8 cores), the *look* method execution time is ~5,6ms and it is called ~163 times per second.
+
+## watch_performance.csv
+
+This file contains the logs of *ArUcoCamera.watch* method execution info. It is created into an *_export* folder from where the [*load* command](../../user_guide/utils/main_commands.md) is launched.
+
+On a MacBookPro (2,3GHz Intel Core i9 8 cores), the *watch* method execution time is ~52ms and it is called ~11,8 times per second.
diff --git a/docs/use_cases/gaze_based_candidate_selection/context.md b/docs/use_cases/gaze_based_candidate_selection/context.md
new file mode 100644
index 0000000..96547ea
--- /dev/null
+++ b/docs/use_cases/gaze_based_candidate_selection/context.md
@@ -0,0 +1,7 @@
+Data playback context
+======================
+
+The context handles incoming eye tracker data before to pass them to a processing pipeline.
+
+## data_playback_context.json
+
diff --git a/docs/use_cases/gaze_based_candidate_selection/introduction.md b/docs/use_cases/gaze_based_candidate_selection/introduction.md
new file mode 100644
index 0000000..da8d6f9
--- /dev/null
+++ b/docs/use_cases/gaze_based_candidate_selection/introduction.md
@@ -0,0 +1,12 @@
+Post-processing screen-based eye tracker data
+=================================================
+
+**ArGaze** enabled ...
+
+The following use case has integrated ...
+
+## Background
+
+## Environment
+
+## Setup
diff --git a/docs/use_cases/gaze_based_candidate_selection/observers.md b/docs/use_cases/gaze_based_candidate_selection/observers.md
new file mode 100644
index 0000000..a1f1fce
--- /dev/null
+++ b/docs/use_cases/gaze_based_candidate_selection/observers.md
@@ -0,0 +1,6 @@
+Metrics and video recording
+===========================
+
+Observers are attached to pipeline steps to be notified when a method is called.
+
+## observers.py
diff --git a/docs/use_cases/gaze_based_candidate_selection/pipeline.md b/docs/use_cases/gaze_based_candidate_selection/pipeline.md
new file mode 100644
index 0000000..6fae01a
--- /dev/null
+++ b/docs/use_cases/gaze_based_candidate_selection/pipeline.md
@@ -0,0 +1,6 @@
+Post processing pipeline
+========================
+
+The pipeline processes gaze data to enable gaze analysis.
+
+## post_processing_pipeline.json
diff --git a/docs/use_cases/pilot_gaze_monitoring/context.md b/docs/use_cases/pilot_gaze_monitoring/context.md
index 417ed13..477276d 100644
--- a/docs/use_cases/pilot_gaze_monitoring/context.md
+++ b/docs/use_cases/pilot_gaze_monitoring/context.md
@@ -1,12 +1,11 @@
-Live streaming context
-======================
+Data capture context
+====================
-The context handles pipeline inputs.
+The context handles incoming eye tracker data before to pass them to a processing pipeline.
## live_streaming_context.json
-For this use case we need to connect to a Tobii Pro Glasses 2 device.
-**ArGaze** provides a context class to live stream data from this device.
+For this use case we need to connect to a Tobii Pro Glasses 2 device: **ArGaze** provides a [ready-made context](../../user_guide/eye_tracking_context/context_modules/tobii_pro_glasses_2.md) class to capture data from this device.
While *address*, *project*, *participant* and *configuration* entries are specific to the [TobiiProGlasses2.LiveStream](../../argaze.md/#argaze.utils.contexts.TobiiProGlasses2.LiveStream) class, *name*, *pipeline* and *observers* entries are part of the parent [ArContext](../../argaze.md/#argaze.ArFeatures.ArContext) class.
@@ -39,4 +38,4 @@ While *address*, *project*, *participant* and *configuration* entries are specif
The [live_processing_pipeline.json](pipeline.md) file mentioned aboved is described in the next chapter.
-The observers objects are defined into the [observers.py](observers.md) file that is described in a next chapter. \ No newline at end of file
+The *IvyBus* observer object is defined into the [observers.py](observers.md) file that is described in a next chapter. \ No newline at end of file
diff --git a/docs/use_cases/pilot_gaze_monitoring/introduction.md b/docs/use_cases/pilot_gaze_monitoring/introduction.md
index 453a443..7e88c69 100644
--- a/docs/use_cases/pilot_gaze_monitoring/introduction.md
+++ b/docs/use_cases/pilot_gaze_monitoring/introduction.md
@@ -30,17 +30,18 @@ Finally, fixation events were sent in real-time through [Ivy bus middleware](htt
## Setup
-The setup to integrate **ArGaze** to the experiment is defined by 3 main files:
+The setup to integrate **ArGaze** to the experiment is defined by 3 main files detailled in the next chapters:
* The context file that captures gaze data and scene camera video: [live_streaming_context.json](context.md)
* The pipeline file that processes gaze data and scene camera video: [live_processing_pipeline.json](pipeline.md)
* The observers file that send fixation events via Ivy bus middleware: [observers.py](observers.md)
-As any **ArGaze** setup, it is loaded by executing the following command:
+As any **ArGaze** setup, it is loaded by executing the [*load* command](../../user_guide/utils/main_commands.md):
```shell
python -m argaze load live_streaming_context.json
```
-## Performance
+This command opens a GUI window that allows to start gaze calibration, to launch recording and to monitor gaze mapping. Another window is also opened to display gaze mapping onto PFD screen.
+![ArGaze load GUI for Haiku](../../img/argaze_load_gui_haiku.png)
diff --git a/docs/use_cases/pilot_gaze_monitoring/observers.md b/docs/use_cases/pilot_gaze_monitoring/observers.md
index 2e3f394..5f5bc78 100644
--- a/docs/use_cases/pilot_gaze_monitoring/observers.md
+++ b/docs/use_cases/pilot_gaze_monitoring/observers.md
@@ -1,8 +1,12 @@
Fixation events sending
=======================
+Observers are attached to pipeline steps to be notified when a method is called.
+
## observers.py
+For this use case we need to enable [Ivy bus communication](https://gitlab.com/ivybus/ivy-python/) to log ArUco detection results (on *ArUcoCamera.on_watch* call) and fixation identification with AOI matching (on *ArUcoCamera.on_look* call).
+
```python
import logging
diff --git a/docs/use_cases/pilot_gaze_monitoring/pipeline.md b/docs/use_cases/pilot_gaze_monitoring/pipeline.md
index 8f8dad0..1450fed 100644
--- a/docs/use_cases/pilot_gaze_monitoring/pipeline.md
+++ b/docs/use_cases/pilot_gaze_monitoring/pipeline.md
@@ -5,8 +5,7 @@ The pipeline processes camera image and gaze data to enable gaze mapping and gaz
## live_processing_pipeline.json
-For this use case we need to detect ArUco markers to enable gaze mapping.
-**ArGaze** provides the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) class to setup an [ArUco markers pipeline](../../user_guide/aruco_marker_pipeline/introduction.md).
+For this use case we need to detect ArUco markers to enable gaze mapping: **ArGaze** provides the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) class to setup an [ArUco markers pipeline](../../user_guide/aruco_marker_pipeline/introduction.md).
```json
{
@@ -37,12 +36,6 @@ For this use case we need to detect ArUco markers to enable gaze mapping.
"PIC_PFD": {
"size": [960, 1080],
"background": "PIC_PFD.png",
- "gaze_movement_identifier": {
- "argaze.GazeAnalysis.DispersionThresholdIdentification.GazeMovementIdentifier": {
- "deviation_max_threshold": 50,
- "duration_min_threshold": 150
- }
- },
"layers": {
"Main": {
"aoi_scene": "PIC_PFD.svg"
@@ -56,9 +49,7 @@ For this use case we need to detect ArUco markers to enable gaze mapping.
}
}
}
- },
- "angle_tolerance": 15.0,
- "distance_tolerance": 10.0
+ }
}
},
"layers": {
@@ -119,7 +110,13 @@ For this use case we need to detect ArUco markers to enable gaze mapping.
}
},
"observers": {
- "observers.ArUcoCameraLogger": {}
+ "observers.ArUcoCameraLogger": {},
+ "argaze.utils.UtilsFeatures.LookPerformanceRecorder": {
+ "path": "_export/look_performance.csv"
+ },
+ "argaze.utils.UtilsFeatures.WatchPerformanceRecorder": {
+ "path": "_export/watch_performance.csv"
+ }
}
}
}
@@ -127,10 +124,12 @@ For this use case we need to detect ArUco markers to enable gaze mapping.
All the files mentioned aboved are described below.
-The observers objects are defined into the [observers.py](observers.md) file that is described in the next chapter.
+The *ArUcoCameraLogger* observer object is defined into the [observers.py](observers.md) file that is described in the next chapter.
## optic_parameters.json
+This file defines the Tobii Pro glasses 2 scene camera optic parameters which has been calculated as explained into [the camera calibration chapter](../../user_guide/aruco_marker_pipeline/advanced_topics/optic_parameters_calibration.md).
+
```json
{
"rms": 0.6688921504088245,
@@ -167,15 +166,19 @@ The observers objects are defined into the [observers.py](observers.md) file tha
## detector_parameters.json
+This file defines the ArUco detector parameters as explained into [the detection improvement chapter](../../user_guide/aruco_marker_pipeline/advanced_topics/aruco_detector_configuration.md).
+
```json
{
"adaptiveThreshConstant": 7,
- "useAruco3Detection": 1
+ "useAruco3Detection": true
}
```
## aruco_scene.obj
+This file defines the place where are the ArUco markers into the cockpit geometry. Markers' positions have been edited in Blender software from a 3D scan of the cockpit then exported at OBJ format.
+
```obj
# Blender v3.0.1 OBJ File: 'scene.blend'
# www.blender.org
@@ -239,6 +242,8 @@ f 29 30 32 31
## Cockpit.obj
+This file defines the place of the AOI into the cockpit geometry. AOI positions have been edited in [Blender software](https://www.blender.org/) from a 3D scan of the cockpit then exported at OBJ format.
+
```obj
# Blender v3.0.1 OBJ File: 'scene.blend'
# www.blender.org
@@ -274,10 +279,14 @@ f 13 14 16 15
## PIC_PFD.png
+This file is a screenshot of the PFD screen used to monitor where the gaze is projected after gaze mapping processing.
+
![PFD frame background](../../img/haiku_PIC_PFD_background.png)
## PIC_PFD.svg
+This file defines the place of the AOI into the PFD frame. AOI positions have been edited with [Inkscape software](https://inkscape.org/fr/) from a screenshot of the PFD screen then exported at SVG format.
+
```svg
<svg>
<rect id="PIC_PFD_Air_Speed" x="93.228" y="193.217" width="135.445" height="571.812"/>
@@ -288,3 +297,15 @@ f 13 14 16 15
<rect id="PIC_PFD_Vertical_Speed" x="819.913" y="193.217" width="85.185" height="609.09"/>
</svg>
```
+
+## look_performance.csv
+
+This file contains the logs of *ArUcoCamera.look* method execution info. It is saved into an *_export* folder from where the [*load* command](../../user_guide/utils/main_commands.md) is launched.
+
+On a Jetson Xavier computer, the *look* method execution time is ~0.5ms and it is called ~100 times per second.
+
+## watch_performance.csv
+
+This file contains the logs of *ArUcoCamera.watch* method execution info. It is saved into an *_export* folder from where the [*load* command](../../user_guide/utils/main_commands.md) is launched.
+
+On a Jetson Xavier computer, the *watch* method execution time is ~50ms and it is called ~10 times per second.
diff --git a/docs/user_guide/aruco_marker_pipeline/advanced_topics/aruco_detector_configuration.md b/docs/user_guide/aruco_marker_pipeline/advanced_topics/aruco_detector_configuration.md
index 975f278..311916b 100644
--- a/docs/user_guide/aruco_marker_pipeline/advanced_topics/aruco_detector_configuration.md
+++ b/docs/user_guide/aruco_marker_pipeline/advanced_topics/aruco_detector_configuration.md
@@ -5,7 +5,7 @@ As explain in the [OpenCV ArUco documentation](https://docs.opencv.org/4.x/d1/dc
## Load ArUcoDetector parameters
-[ArUcoCamera.detector.parameters](../../../argaze.md/#argaze.ArUcoMarker.ArUcoDetector.Parameters) can be loaded thanks to a dedicated JSON entry.
+[ArUcoCamera.detector.parameters](../../../argaze.md/#argaze.ArUcoMarker.ArUcoDetector.Parameters) can be loaded with a dedicated JSON entry.
Here is an extract from the JSON [ArUcoCamera](../../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) configuration file with ArUco detector parameters:
@@ -18,7 +18,7 @@ Here is an extract from the JSON [ArUcoCamera](../../../argaze.md/#argaze.ArUcoM
"dictionary": "DICT_APRILTAG_16h5",
"parameters": {
"adaptiveThreshConstant": 10,
- "useAruco3Detection": 1
+ "useAruco3Detection": true
}
},
...
diff --git a/docs/user_guide/aruco_marker_pipeline/advanced_topics/optic_parameters_calibration.md b/docs/user_guide/aruco_marker_pipeline/advanced_topics/optic_parameters_calibration.md
index 625f257..e9ce740 100644
--- a/docs/user_guide/aruco_marker_pipeline/advanced_topics/optic_parameters_calibration.md
+++ b/docs/user_guide/aruco_marker_pipeline/advanced_topics/optic_parameters_calibration.md
@@ -134,7 +134,7 @@ Below, an optic_parameters JSON file example:
## Load and display optic parameters
-[ArUcoCamera.detector.optic_parameters](../../../argaze.md/#argaze.ArUcoMarker.ArUcoOpticCalibrator.OpticParameters) can be enabled thanks to a dedicated JSON entry.
+[ArUcoCamera.detector.optic_parameters](../../../argaze.md/#argaze.ArUcoMarker.ArUcoOpticCalibrator.OpticParameters) can be enabled with a dedicated JSON entry.
Here is an extract from the JSON [ArUcoCamera](../../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) configuration file where optic parameters are loaded and displayed:
diff --git a/docs/user_guide/aruco_marker_pipeline/advanced_topics/scripting.md b/docs/user_guide/aruco_marker_pipeline/advanced_topics/scripting.md
index a9d66e9..f258e04 100644
--- a/docs/user_guide/aruco_marker_pipeline/advanced_topics/scripting.md
+++ b/docs/user_guide/aruco_marker_pipeline/advanced_topics/scripting.md
@@ -150,7 +150,7 @@ Particularly, timestamped gaze positions can be passed one by one to the [ArUcoC
## Setup ArUcoCamera image parameters
-Specific [ArUcoCamera.image](../../../argaze.md/#argaze.ArFeatures.ArFrame.image) method parameters can be configured thanks to a Python dictionary.
+Specific [ArUcoCamera.image](../../../argaze.md/#argaze.ArFeatures.ArFrame.image) method parameters can be configured with a Python dictionary.
```python
# Assuming ArUcoCamera is loaded
diff --git a/docs/user_guide/aruco_marker_pipeline/aoi_3d_description.md b/docs/user_guide/aruco_marker_pipeline/aoi_3d_description.md
index 46422b8..78a513a 100644
--- a/docs/user_guide/aruco_marker_pipeline/aoi_3d_description.md
+++ b/docs/user_guide/aruco_marker_pipeline/aoi_3d_description.md
@@ -1,7 +1,7 @@
Describe 3D AOI
===============
-Now that the [scene pose is estimated](aruco_marker_description.md) thanks to ArUco markers description, [areas of interest (AOI)](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) need to be described into the same 3D referential.
+Now that the [scene pose is estimated](aruco_marker_description.md) considering the ArUco markers description, [areas of interest (AOI)](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) need to be described into the same 3D referential.
In the example scene, the two screens—the control panel and the window—are considered to be areas of interest.
diff --git a/docs/user_guide/aruco_marker_pipeline/configuration_and_execution.md b/docs/user_guide/aruco_marker_pipeline/configuration_and_execution.md
index c2ee1b9..56846e2 100644
--- a/docs/user_guide/aruco_marker_pipeline/configuration_and_execution.md
+++ b/docs/user_guide/aruco_marker_pipeline/configuration_and_execution.md
@@ -1,7 +1,7 @@
Edit and execute pipeline
=========================
-Once [ArUco markers are placed into a scene](aruco_marker_description.md), they can be detected thanks to [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) class.
+Once [ArUco markers are placed into a scene](aruco_marker_description.md), they can be detected by the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) class.
As [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) inherits from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame), the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) class also benefits from all the services described in the [gaze analysis pipeline section](../gaze_analysis_pipeline/introduction.md).
diff --git a/docs/user_guide/eye_tracking_context/advanced_topics/context_definition.md b/docs/user_guide/eye_tracking_context/advanced_topics/context_definition.md
index c163696..a543bc7 100644
--- a/docs/user_guide/eye_tracking_context/advanced_topics/context_definition.md
+++ b/docs/user_guide/eye_tracking_context/advanced_topics/context_definition.md
@@ -3,27 +3,27 @@ Define a context class
The [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext) class defines a generic base class interface to handle incoming eye tracker data before to pass them to a processing pipeline according to [Python context manager feature](https://docs.python.org/3/reference/datamodel.html#context-managers).
-The [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext) class interface provides playback features to stop or pause processings, performance assement features to measure how many times processings are called and the time spent by the process.
+The [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext) class interface provides control features to stop or pause working threads, performance assessment features to measure how many times processings are called and the time spent by the process.
-Besides, there is also a [LiveProcessingContext](../../../argaze.md/#argaze.ArFeatures.LiveProcessingContext) class that inherits from [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext) and that defines an abstract *calibrate* method to write specific device calibration process.
+Besides, there is also a [DataCaptureContext](../../../argaze.md/#argaze.ArFeatures.DataCaptureContext) class that inherits from [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext) and that defines an abstract *calibrate* method to write specific device calibration process.
-In the same way, there is a [PostProcessingContext](../../../argaze.md/#argaze.ArFeatures.PostProcessingContext) class that inherits from [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext) and that defines abstract *previous* and *next* playback methods to move into record's frames and also defines *duration* and *progression* properties to get information about a record length and processing advancement.
+In the same way, there is a [DataPlaybackContext](../../../argaze.md/#argaze.ArFeatures.DataPlaybackContext) class that inherits from [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext) and that defines *duration* and *progression* properties to get information about a record length and playback advancement.
-Finally, a specific eye tracking context can be defined into a Python file by writing a class that inherits either from [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext), [LiveProcessingContext](../../../argaze.md/#argaze.ArFeatures.LiveProcessingContext) or [PostProcessingContext](../../../argaze.md/#argaze.ArFeatures.PostProcessingContext) class.
+Finally, a specific eye tracking context can be defined into a Python file by writing a class that inherits either from [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext), [DataCaptureContext](../../../argaze.md/#argaze.ArFeatures.DataCaptureContext) or [DataPlaybackContext](../../../argaze.md/#argaze.ArFeatures.DataPlaybackContext) class.
-## Write live processing context
+## Write data capture context
-Here is a live processing context example that processes gaze positions and camera images in two separated threads:
+Here is a data cpature context example that processes gaze positions and camera images in two separated threads:
```python
from argaze import ArFeatures, DataFeatures
-class LiveProcessingExample(ArFeatures.LiveProcessingContext):
+class DataCaptureExample(ArFeatures.DataCaptureContext):
@DataFeatures.PipelineStepInit
def __init__(self, **kwargs):
- # Init LiveProcessingContext class
+ # Init DataCaptureContext class
super().__init__()
# Init private attribute
@@ -45,23 +45,23 @@ class LiveProcessingExample(ArFeatures.LiveProcessingContext):
# Start context according any specific parameter
... self.parameter
- # Start a gaze position processing thread
- self.__gaze_thread = threading.Thread(target = self.__gaze_position_processing)
+ # Start a gaze position capture thread
+ self.__gaze_thread = threading.Thread(target = self.__gaze_position_capture)
self.__gaze_thread.start()
- # Start a camera image processing thread if applicable
- self.__camera_thread = threading.Thread(target = self.__camera_image_processing)
+ # Start a camera image capture thread if applicable
+ self.__camera_thread = threading.Thread(target = self.__camera_image_capture)
self.__camera_thread.start()
return self
- def __gaze_position_processing(self):
- """Process gaze position."""
+ def __gaze_position_capture(self):
+ """Capture gaze position."""
- # Processing loop
+ # Capture loop
while self.is_running():
- # Pause processing
+ # Pause capture
if not self.is_paused():
# Assuming that timestamp, x and y values are available
@@ -73,13 +73,13 @@ class LiveProcessingExample(ArFeatures.LiveProcessingContext):
# Wait some time eventually
...
- def __camera_image_processing(self):
- """Process camera image if applicable."""
+ def __camera_image_capture(self):
+ """Capture camera image if applicable."""
- # Processing loop
+ # Capture loop
while self.is_running():
- # Pause processing
+ # Pause capture
if not self.is_paused():
# Assuming that timestamp, camera_image are available
@@ -95,10 +95,10 @@ class LiveProcessingExample(ArFeatures.LiveProcessingContext):
def __exit__(self, exception_type, exception_value, exception_traceback):
"""End context."""
- # Stop processing loops
+ # Stop capture loops
self.stop()
- # Stop processing threads
+ # Stop capture threads
threading.Thread.join(self.__gaze_thread)
threading.Thread.join(self.__camera_thread)
@@ -108,19 +108,19 @@ class LiveProcessingExample(ArFeatures.LiveProcessingContext):
...
```
-## Write post processing context
+## Write data playback context
-Here is a post processing context example that processes gaze positions and camera images in a same thread:
+Here is a data playback context example that reads gaze positions and camera images in a same thread:
```python
from argaze import ArFeatures, DataFeatures
-class PostProcessingExample(ArFeatures.PostProcessingContext):
+class DataPlaybackExample(ArFeatures.DataPlaybackContext):
@DataFeatures.PipelineStepInit
def __init__(self, **kwargs):
- # Init LiveProcessingContext class
+ # Init DataCaptureContext class
super().__init__()
# Init private attribute
@@ -142,19 +142,19 @@ class PostProcessingExample(ArFeatures.PostProcessingContext):
# Start context according any specific parameter
... self.parameter
- # Start a reading data thread
- self.__read_thread = threading.Thread(target = self.__data_reading)
- self.__read_thread.start()
+ # Start a data playback thread
+ self.__data_thread = threading.Thread(target = self.__data_playback)
+ self.__data_thread.start()
return self
- def __data_reading(self):
- """Process gaze position and camera image if applicable."""
+ def __data_playback(self):
+ """Playback gaze position and camera image if applicable."""
- # Processing loop
+ # Playback loop
while self.is_running():
- # Pause processing
+ # Pause playback
if not self.is_paused():
# Assuming that timestamp, camera_image are available
@@ -176,18 +176,20 @@ class PostProcessingExample(ArFeatures.PostProcessingContext):
def __exit__(self, exception_type, exception_value, exception_traceback):
"""End context."""
- # Stop processing loops
+ # Stop playback loop
self.stop()
- # Stop processing threads
- threading.Thread.join(self.__read_thread)
+ # Stop playback threads
+ threading.Thread.join(self.__data_thread)
- def previous(self):
- """Go to previous camera image frame."""
+ @property
+ def duration(self) -> int|float:
+ """Get data duration."""
...
- def next(self):
- """Go to next camera image frame."""
+ @property
+ def progression(self) -> float:
+ """Get data playback progression between 0 and 1."""
...
```
diff --git a/docs/user_guide/eye_tracking_context/advanced_topics/scripting.md b/docs/user_guide/eye_tracking_context/advanced_topics/scripting.md
index 8753eb6..d8eb389 100644
--- a/docs/user_guide/eye_tracking_context/advanced_topics/scripting.md
+++ b/docs/user_guide/eye_tracking_context/advanced_topics/scripting.md
@@ -68,12 +68,12 @@ from argaze import ArFeatures
# Check context type
- # Live processing case: calibration method is available
- if issubclass(type(context), ArFeatures.LiveProcessingContext):
+ # Data capture case: calibration method is available
+ if issubclass(type(context), ArFeatures.DataCaptureContext):
...
- # Post processing case: more playback methods are available
- if issubclass(type(context), ArFeatures.PostProcessingContext):
+ # Data playback case: playback methods are available
+ if issubclass(type(context), ArFeatures.DataPlaybackContext):
...
# Check pipeline type
diff --git a/docs/user_guide/eye_tracking_context/advanced_topics/timestamped_gaze_positions_edition.md b/docs/user_guide/eye_tracking_context/advanced_topics/timestamped_gaze_positions_edition.md
index 340dbaf..959d955 100644
--- a/docs/user_guide/eye_tracking_context/advanced_topics/timestamped_gaze_positions_edition.md
+++ b/docs/user_guide/eye_tracking_context/advanced_topics/timestamped_gaze_positions_edition.md
@@ -28,8 +28,8 @@ for timestamped_gaze_position in ts_gaze_positions:
## Edit timestamped gaze positions from live stream
-Real-time gaze positions can be edited thanks to the [GazePosition](../../../argaze.md/#argaze.GazeFeatures.GazePosition) class.
-Besides, timestamps can be edited from the incoming data stream or, if not available, they can be edited thanks to the Python [time package](https://docs.python.org/3/library/time.html).
+Real-time gaze positions can be edited using directly the [GazePosition](../../../argaze.md/#argaze.GazeFeatures.GazePosition) class.
+Besides, timestamps can be edited from the incoming data stream or, if not available, they can be edited using the Python [time package](https://docs.python.org/3/library/time.html).
```python
from argaze import GazeFeatures
diff --git a/docs/user_guide/eye_tracking_context/configuration_and_execution.md b/docs/user_guide/eye_tracking_context/configuration_and_execution.md
index f13c6a2..e1123fb 100644
--- a/docs/user_guide/eye_tracking_context/configuration_and_execution.md
+++ b/docs/user_guide/eye_tracking_context/configuration_and_execution.md
@@ -3,9 +3,9 @@ Edit and execute context
The [utils.contexts module](../../argaze.md/#argaze.utils.contexts) provides ready-made contexts like:
-* [Tobii Pro Glasses 2](context_modules/tobii_pro_glasses_2.md) live stream and post processing contexts,
-* [Pupil Labs](context_modules/pupil_labs.md) live stream context,
-* [OpenCV](context_modules/opencv.md) window cursor position and movie processing,
+* [Tobii Pro Glasses 2](context_modules/tobii_pro_glasses_2.md) data capture and data playback contexts,
+* [Pupil Labs](context_modules/pupil_labs.md) data capture context,
+* [OpenCV](context_modules/opencv.md) window cursor position capture and movie playback,
* [Random](context_modules/random.md) gaze position generator.
## Edit JSON configuration
diff --git a/docs/user_guide/eye_tracking_context/context_modules/opencv.md b/docs/user_guide/eye_tracking_context/context_modules/opencv.md
index 7244cd4..7d73a03 100644
--- a/docs/user_guide/eye_tracking_context/context_modules/opencv.md
+++ b/docs/user_guide/eye_tracking_context/context_modules/opencv.md
@@ -39,9 +39,25 @@ Read more about [ArContext base class in code reference](../../../argaze.md/#arg
```json
{
"argaze.utils.contexts.OpenCV.Movie": {
- "name": "Open CV cursor",
+ "name": "Open CV movie",
"path": "./src/argaze/utils/demo/tobii_record/segments/1/fullstream.mp4",
"pipeline": ...
}
}
```
+
+## Camera
+
+::: argaze.utils.contexts.OpenCV.Camera
+
+### JSON sample
+
+```json
+{
+ "argaze.utils.contexts.OpenCV.Camera": {
+ "name": "Open CV camera",
+ "identifier": 0,
+ "pipeline": ...
+ }
+}
+``` \ No newline at end of file
diff --git a/docs/user_guide/eye_tracking_context/context_modules/tobii_pro_glasses_2.md b/docs/user_guide/eye_tracking_context/context_modules/tobii_pro_glasses_2.md
index fba6931..6ff44bd 100644
--- a/docs/user_guide/eye_tracking_context/context_modules/tobii_pro_glasses_2.md
+++ b/docs/user_guide/eye_tracking_context/context_modules/tobii_pro_glasses_2.md
@@ -42,16 +42,16 @@ Read more about [ArContext base class in code reference](../../../argaze.md/#arg
}
```
-## Post Processing
+## Segment Playback
-::: argaze.utils.contexts.TobiiProGlasses2.PostProcessing
+::: argaze.utils.contexts.TobiiProGlasses2.SegmentPlayback
### JSON sample
```json
{
- "argaze.utils.contexts.TobiiProGlasses2.PostProcessing" : {
- "name": "Tobii Pro Glasses 2 post-processing",
+ "argaze.utils.contexts.TobiiProGlasses2.SegmentPlayback" : {
+ "name": "Tobii Pro Glasses 2 segment playback",
"segment": "./src/argaze/utils/demo/tobii_record/segments/1",
"pipeline": ...
}
diff --git a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/gaze_position_calibration.md b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/gaze_position_calibration.md
index 4970dba..effee18 100644
--- a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/gaze_position_calibration.md
+++ b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/gaze_position_calibration.md
@@ -7,7 +7,7 @@ The calibration algorithm can be selected by instantiating a particular [GazePos
## Enable ArFrame calibration
-Gaze position calibration can be enabled thanks to a dedicated JSON entry.
+Gaze position calibration can be enabled with a dedicated JSON entry.
Here is an extract from the JSON ArFrame configuration file where a [Linear Regression](../../../argaze.md/#argaze.GazeAnalysis.LinearRegression) calibration algorithm is selected with no parameters:
diff --git a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md
index 264e866..843274a 100644
--- a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md
+++ b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md
@@ -158,7 +158,7 @@ Last [GazeMovement](../../../argaze.md/#argaze.GazeFeatures.GazeMovement) identi
This could also be the current gaze movement if [ArFrame.filter_in_progress_identification](../../../argaze.md/#argaze.ArFeatures.ArFrame) attribute is false.
In that case, the last gaze movement *finished* flag is false.
-Then, the last gaze movement type can be tested thanks to [GazeFeatures.is_fixation](../../../argaze.md/#argaze.GazeFeatures.is_fixation) and [GazeFeatures.is_saccade](../../../argaze.md/#argaze.GazeFeatures.is_saccade) functions.
+Then, the last gaze movement type can be tested with [GazeFeatures.is_fixation](../../../argaze.md/#argaze.GazeFeatures.is_fixation) and [GazeFeatures.is_saccade](../../../argaze.md/#argaze.GazeFeatures.is_saccade) functions.
### *ar_frame.is_analysis_available()*
@@ -182,7 +182,7 @@ This an iterator to access to all aoi scan path analysis. Notice that each aoi s
## Setup ArFrame image parameters
-[ArFrame.image](../../../argaze.md/#argaze.ArFeatures.ArFrame.image) method parameters can be configured thanks to a Python dictionary.
+[ArFrame.image](../../../argaze.md/#argaze.ArFeatures.ArFrame.image) method parameters can be configured with a Python dictionary.
```python
# Assuming ArFrame is loaded
diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md
index 2b64091..c2a6ac3 100644
--- a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md
+++ b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md
@@ -100,6 +100,11 @@ The second [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step a
Once gaze movements are matched to AOI, they are automatically appended to the AOIScanPath if required.
+!!! warning "GazeFeatures.OutsideAOI"
+ When a fixation is not looking at any AOI, a step associated with an AOI called [GazeFeatures.OutsideAOI](../../argaze.md/#argaze.GazeFeatures.OutsideAOI) is added. As long as fixations are not looking at any AOI, all fixations/saccades are stored in this step. In this way, further analysis are calculated considering those extra [GazeFeatures.OutsideAOI](../../argaze.md/#argaze.GazeFeatures.OutsideAOI) steps.
+
+ This is particularly important when calculating transition matrices, because otherwise we could have arcs between two AOIs when in fact the gaze could have fixed itself outside in the meantime.
+
The [AOIScanPath.duration_max](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.duration_max) attribute is the duration from which older AOI scan steps are removed each time new AOI scan steps are added.
!!! note "Optional"
diff --git a/docs/user_guide/gaze_analysis_pipeline/background.md b/docs/user_guide/gaze_analysis_pipeline/background.md
index 900d151..11285e3 100644
--- a/docs/user_guide/gaze_analysis_pipeline/background.md
+++ b/docs/user_guide/gaze_analysis_pipeline/background.md
@@ -7,7 +7,7 @@ Background is an optional [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame)
## Load and display ArFrame background
-[ArFrame.background](../../argaze.md/#argaze.ArFeatures.ArFrame.background) can be enabled thanks to a dedicated JSON entry.
+[ArFrame.background](../../argaze.md/#argaze.ArFeatures.ArFrame.background) can be enabled with a dedicated JSON entry.
Here is an extract from the JSON ArFrame configuration file where a background picture is loaded and displayed:
@@ -28,7 +28,7 @@ Here is an extract from the JSON ArFrame configuration file where a background p
```
!!! note
- As explained in [visualization chapter](visualization.md), the resulting image is accessible thanks to [ArFrame.image](../../argaze.md/#argaze.ArFeatures.ArFrame.image) method.
+ As explained in [visualization chapter](visualization.md), the resulting image is accessible with [ArFrame.image](../../argaze.md/#argaze.ArFeatures.ArFrame.image) method.
Now, let's understand the meaning of each JSON entry.
diff --git a/docs/user_guide/gaze_analysis_pipeline/heatmap.md b/docs/user_guide/gaze_analysis_pipeline/heatmap.md
index 2057dbe..77b2be0 100644
--- a/docs/user_guide/gaze_analysis_pipeline/heatmap.md
+++ b/docs/user_guide/gaze_analysis_pipeline/heatmap.md
@@ -7,7 +7,7 @@ Heatmap is an optional [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pip
## Enable and display ArFrame heatmap
-[ArFrame.heatmap](../../argaze.md/#argaze.ArFeatures.ArFrame.heatmap) can be enabled thanks to a dedicated JSON entry.
+[ArFrame.heatmap](../../argaze.md/#argaze.ArFeatures.ArFrame.heatmap) can be enabled with a dedicated JSON entry.
Here is an extract from the JSON ArFrame configuration file where heatmap is enabled and displayed:
@@ -31,7 +31,7 @@ Here is an extract from the JSON ArFrame configuration file where heatmap is ena
}
```
!!! note
- [ArFrame.heatmap](../../argaze.md/#argaze.ArFeatures.ArFrame.heatmap) is automatically updated each time the [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method is called. As explained in [visualization chapter](visualization.md), the resulting image is accessible thanks to [ArFrame.image](../../argaze.md/#argaze.ArFeatures.ArFrame.image) method.
+ [ArFrame.heatmap](../../argaze.md/#argaze.ArFeatures.ArFrame.heatmap) is automatically updated each time the [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method is called. As explained in [visualization chapter](visualization.md), the resulting image is accessible with [ArFrame.image](../../argaze.md/#argaze.ArFeatures.ArFrame.image) method.
Now, let's understand the meaning of each JSON entry.
diff --git a/docs/user_guide/gaze_analysis_pipeline/visualization.md b/docs/user_guide/gaze_analysis_pipeline/visualization.md
index 32395c3..08b5465 100644
--- a/docs/user_guide/gaze_analysis_pipeline/visualization.md
+++ b/docs/user_guide/gaze_analysis_pipeline/visualization.md
@@ -7,7 +7,7 @@ Visualization is not a pipeline step, but each [ArFrame](../../argaze.md/#argaze
## Add image parameters to ArFrame JSON configuration
-[ArFrame.image](../../argaze.md/#argaze.ArFeatures.ArFrame.image) method parameters can be configured thanks to a dedicated JSON entry.
+[ArFrame.image](../../argaze.md/#argaze.ArFeatures.ArFrame.image) method parameters can be configured with a dedicated JSON entry.
Here is an extract from the JSON ArFrame configuration file with a sample where image parameters are added:
diff --git a/docs/user_guide/utils/demonstrations_scripts.md b/docs/user_guide/utils/demonstrations_scripts.md
index dd1b8e0..59df85b 100644
--- a/docs/user_guide/utils/demonstrations_scripts.md
+++ b/docs/user_guide/utils/demonstrations_scripts.md
@@ -9,30 +9,47 @@ Collection of command-line scripts for demonstration purpose.
!!! note
*Use -h option to get command arguments documentation.*
+!!! note
+ Each demonstration outputs metrics into *_export/records* folder.
+
## Random context
-Load **random_context.json** file to process random gaze positions:
+Load **random_context.json** file to generate random gaze positions:
```shell
python -m argaze load ./src/argaze/utils/demo/random_context.json
```
-## OpenCV cursor context
+## OpenCV
-Load **opencv_cursor_context.json** file to process cursor pointer positions over OpenCV window:
+### Cursor context
+
+Load **opencv_cursor_context.json** file to capture cursor pointer positions over OpenCV window:
```shell
python -m argaze load ./src/argaze/utils/demo/opencv_cursor_context.json
```
-## OpenCV movie context
+### Movie context
-Load **opencv_movie_context.json** file to process movie pictures and also cursor pointer positions over OpenCV window:
+Load **opencv_movie_context.json** file to playback a movie and also capture cursor pointer positions over OpenCV window:
```shell
python -m argaze load ./src/argaze/utils/demo/opencv_movie_context.json
```
+### Camera context
+
+Edit **aruco_markers_pipeline.json** file as to adapt the *size* to the camera resolution and to reduce the value of the *sides_mask*.
+
+Edit **opencv_camera_context.json** file as to select camera device identifier (default is 0).
+
+Then, load **opencv_camera_context.json** file to capture camera pictures and also capture cursor pointer positions over OpenCV window:
+
+```shell
+python -m argaze load ./src/argaze/utils/demo/opencv_camera_context.json
+```
+
## Tobii Pro Glasses 2
### Live stream context
@@ -69,27 +86,24 @@ Then, load **tobii_live_stream_context.json** file to find ArUco marker into cam
python -m argaze load ./src/argaze/utils/demo/tobii_live_stream_context.json
```
-### Post-processing context
-
-!!! note
- This demonstration requires to print **A3_demo.pdf** file located in *./src/argaze/utils/demo/* folder on A3 paper sheet.
+### Segment playback context
-Edit **tobii_post_processing_context.json** file to select an existing Tobii *segment* folder:
+Edit **tobii_segment_playback_context.json** file to select an existing Tobii *segment* folder:
```json
{
- "argaze.utils.contexts.TobiiProGlasses2.PostProcessing" : {
- "name": "Tobii Pro Glasses 2 post-processing",
+ "argaze.utils.contexts.TobiiProGlasses2.SegmentPlayback" : {
+ "name": "Tobii Pro Glasses 2 segment playback",
"segment": "record/segments/1",
"pipeline": "aruco_markers_pipeline.json"
}
}
```
-Then, load **tobii_post_processing_context.json** file to find ArUco marker into camera image and, project gaze positions into AOI:
+Then, load **tobii_segment_playback_context.json** file to find ArUco marker into camera image and, project gaze positions into AOI:
```shell
-python -m argaze load ./src/argaze/utils/demo/tobii_post_processing_context.json
+python -m argaze load ./src/argaze/utils/demo/tobii_segment_playback_context.json
```
## Pupil Invisible
diff --git a/docs/user_guide/utils/estimate_aruco_markers_pose.md b/docs/user_guide/utils/estimate_aruco_markers_pose.md
index 3d34972..55bd232 100644
--- a/docs/user_guide/utils/estimate_aruco_markers_pose.md
+++ b/docs/user_guide/utils/estimate_aruco_markers_pose.md
@@ -15,7 +15,7 @@ Firstly, edit **utils/estimate_markers_pose/context.json** file as to select a m
}
```
-Sencondly, edit **utils/estimate_markers_pose/pipeline.json** file to setup ArUco camera *size*, ArUco detector *dictionary*, *pose_size* and *pose_ids* attributes.
+Secondly, edit **utils/estimate_markers_pose/pipeline.json** file to setup ArUco camera *size*, ArUco detector *dictionary*, *pose_size* and *pose_ids* attributes.
```json
{
@@ -27,7 +27,7 @@ Sencondly, edit **utils/estimate_markers_pose/pipeline.json** file to setup ArUc
"pose_size": 4,
"pose_ids": [],
"parameters": {
- "useAruco3Detection": 1
+ "useAruco3Detection": true
},
"observers":{
"observers.ArUcoMarkersPoseRecorder": {
diff --git a/docs/user_guide/utils/main_commands.md b/docs/user_guide/utils/main_commands.md
index 4dd3434..c4887a4 100644
--- a/docs/user_guide/utils/main_commands.md
+++ b/docs/user_guide/utils/main_commands.md
@@ -35,13 +35,13 @@ For example:
echo "print(context)" > /tmp/argaze
```
-* Pause context processing:
+* Pause context:
```shell
echo "context.pause()" > /tmp/argaze
```
-* Resume context processing:
+* Resume context:
```shell
echo "context.resume()" > /tmp/argaze
diff --git a/mkdocs.yml b/mkdocs.yml
index 17fc65a..8aadb7d 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -57,6 +57,16 @@ nav:
- use_cases/pilot_gaze_monitoring/context.md
- use_cases/pilot_gaze_monitoring/pipeline.md
- use_cases/pilot_gaze_monitoring/observers.md
+ - Air traffic controller gaze study:
+ - use_cases/air_controller_gaze_study/introduction.md
+ - use_cases/air_controller_gaze_study/context.md
+ - use_cases/air_controller_gaze_study/pipeline.md
+ - use_cases/air_controller_gaze_study/observers.md
+ #- Gaze-based candidate selection:
+ # - use_cases/gaze_based_candidate_selection/introduction.md
+ # - use_cases/gaze_based_candidate_selection/context.md
+ # - use_cases/gaze_based_candidate_selection/pipeline.md
+ # - use_cases/gaze_based_candidate_selection/observers.md
- Code Reference:
- argaze.md
- Contributor Guide:
diff --git a/pyproject.toml b/pyproject.toml
index 383e156..b48e0f8 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -30,8 +30,8 @@ classifiers=[
"Operating System :: OS Independent"
]
dependencies = [
- "opencv-python>=4.7.0",
- "opencv-contrib-python>=4.7.0",
+ "opencv-python>=4.10.0",
+ "opencv-contrib-python>=4.10.0",
"numpy",
"pandas",
"av",
diff --git a/src/argaze.test/ArUcoMarker/ArUcoCamera.py b/src/argaze.test/ArUcoMarker/ArUcoCamera.py
index 76b567e..0777beb 100644
--- a/src/argaze.test/ArUcoMarker/ArUcoCamera.py
+++ b/src/argaze.test/ArUcoMarker/ArUcoCamera.py
@@ -71,10 +71,6 @@ class TestArUcoCameraClass(unittest.TestCase):
self.assertEqual(len(ar_scene.layers.items()), 1)
self.assertEqual(len(ar_scene.layers["Main"].aoi_scene), 1)
self.assertEqual(ar_scene.layers["Main"].aoi_scene['Test'].points_number, 4)
-
- # Check ArScene
- self.assertEqual(ar_scene.angle_tolerance, 1.0)
- self.assertEqual(ar_scene.distance_tolerance, 2.0)
if __name__ == '__main__':
diff --git a/src/argaze.test/ArUcoMarker/utils/aruco_camera.json b/src/argaze.test/ArUcoMarker/utils/aruco_camera.json
index 980dc9f..7217c0e 100644
--- a/src/argaze.test/ArUcoMarker/utils/aruco_camera.json
+++ b/src/argaze.test/ArUcoMarker/utils/aruco_camera.json
@@ -63,9 +63,7 @@
"Main" : {
"aoi_scene": "aoi_3d.obj"
}
- },
- "angle_tolerance": 1.0,
- "distance_tolerance": 2.0
+ }
},
"TestSceneB" : {
"aruco_markers_group": {
@@ -87,9 +85,7 @@
"Main" : {
"aoi_scene": "aoi_3d.obj"
}
- },
- "angle_tolerance": 1.0,
- "distance_tolerance": 2.0
+ }
}
},
"layers": {
diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py
index 8d9eceb..4515ae1 100644
--- a/src/argaze/ArFeatures.py
+++ b/src/argaze/ArFeatures.py
@@ -344,7 +344,7 @@ class ArLayer(DataFeatures.SharedObject, DataFeatures.PipelineStepObject):
if self.__aoi_matcher is not None and self.__aoi_scene is not None:
- # Update looked aoi thanks to aoi matcher
+ # Update looked aoi with aoi matcher
# Note: don't filter valid/invalid and finished/unfinished fixation/saccade as we don't know how the aoi matcher works internally
self.__looked_aoi_name, _ = self.__aoi_matcher.match(gaze_movement, self.__aoi_scene)
@@ -915,8 +915,6 @@ class ArScene(DataFeatures.PipelineStepObject):
# Init private attributes
self._layers = {}
self.__frames = {}
- self.__angle_tolerance = 0.
- self.__distance_tolerance = 0.
@property
def layers(self) -> dict:
@@ -1010,35 +1008,13 @@ class ArScene(DataFeatures.PipelineStepObject):
for name, frame in self.__frames.items():
frame.parent = self
- @property
- def angle_tolerance(self) -> float:
- """Angle error tolerance to validate marker pose in degree used into [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function."""
- return self.__angle_tolerance
-
- @angle_tolerance.setter
- def angle_tolerance(self, value: float):
-
- self.__angle_tolerance = value
-
- @property
- def distance_tolerance(self) -> float:
- """Distance error tolerance to validate marker pose in centimeter used into [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function."""
- return self.__distance_tolerance
-
- @distance_tolerance.setter
- def distance_tolerance(self, value: float):
-
- self.__distance_tolerance = value
-
def as_dict(self) -> dict:
"""Export ArScene properties as dictionary."""
return {
**DataFeatures.PipelineStepObject.as_dict(self),
"layers": self._layers,
- "frames": self.__frames,
- "angle_tolerance": self.__angle_tolerance,
- "distance_tolerance": self.__distance_tolerance
+ "frames": self.__frames
}
@DataFeatures.PipelineStepMethod
@@ -1174,8 +1150,11 @@ class ArCamera(ArFrame):
self.__projection_cache = projection_cache
+ # DEBUG
+ print("projection_cache", self.__projection_cache)
+
# The file doesn't exist yet: store projections into the cache
- if not os.path.exists(os.path.join( DataFeatures.get_working_directory(), self.__projection_cache) ):
+ if not os.path.exists(self.__projection_cache):
self.__projection_cache_writer = UtilsFeatures.FileWriter(path=self.__projection_cache)
self.__projection_cache_reader = None
@@ -1521,7 +1500,7 @@ class ArContext(DataFeatures.PipelineStepObject):
self._image_parameters = DEFAULT_ARCONTEXT_IMAGE_PARAMETERS
@property
- def pipeline(self) -> DataFeatures.PipelineStepObject:
+ def pipeline(self) -> ArFrame|ArCamera:
"""ArFrame used to process gaze data or ArCamera used to process gaze data and video of environment."""
return self.__pipeline
@@ -1538,7 +1517,7 @@ class ArContext(DataFeatures.PipelineStepObject):
return self.__exceptions
def as_dict(self) -> dict:
- """Export ArContext properties as dictionary."""
+ """Export context properties as dictionary."""
return {
**DataFeatures.PipelineStepObject.as_dict(self),
@@ -1548,13 +1527,13 @@ class ArContext(DataFeatures.PipelineStepObject):
@DataFeatures.PipelineStepEnter
def __enter__(self):
- """Enter into ArContext."""
+ """Enter into context."""
return self
@DataFeatures.PipelineStepExit
def __exit__(self, exception_type, exception_value, exception_traceback):
- """Exit from ArContext."""
+ """Exit from context."""
pass
def _process_gaze_position(self, timestamp: int | float, x: int | float = None, y: int | float = None, precision: int | float = None):
@@ -1709,24 +1688,24 @@ class ArContext(DataFeatures.PipelineStepObject):
@DataFeatures.PipelineStepMethod
def pause(self):
- """Pause pipeline processing."""
+ """Pause context."""
self._pause_event.set()
def is_paused(self) -> bool:
- """Is pipeline processing paused?"""
+ """Is context paused?"""
return self._pause_event.is_set()
@DataFeatures.PipelineStepMethod
def resume(self):
- """Resume pipeline processing."""
+ """Resume context."""
self._pause_event.clear()
-class LiveProcessingContext(ArContext):
+class DataCaptureContext(ArContext):
"""
- Defines abstract live data processing context.
+ Defines abstract data capture context.
"""
@DataFeatures.PipelineStepInit
@@ -1739,14 +1718,14 @@ class LiveProcessingContext(ArContext):
raise NotImplementedError
-# Define default PostProcessingContext image parameters
-DEFAULT_POST_PROCESSING_CONTEXT_IMAGE_PARAMETERS = {
+# Define default DataPlaybackContext image parameters
+DEFAULT_DATA_PLAYBACK_CONTEXT_IMAGE_PARAMETERS = {
"draw_progression": True
}
-class PostProcessingContext(ArContext):
+class DataPlaybackContext(ArContext):
"""
- Defines abstract post data processing context.
+ Defines abstract data playback context.
"""
@DataFeatures.PipelineStepInit
@@ -1754,17 +1733,7 @@ class PostProcessingContext(ArContext):
super().__init__()
- self._image_parameters = {**DEFAULT_ARCONTEXT_IMAGE_PARAMETERS, **DEFAULT_POST_PROCESSING_CONTEXT_IMAGE_PARAMETERS}
-
- def previous(self):
- """Go to previous frame"""
-
- raise NotImplementedError
-
- def next(self):
- """Go to next frame"""
-
- raise NotImplementedError
+ self._image_parameters = {**DEFAULT_ARCONTEXT_IMAGE_PARAMETERS, **DEFAULT_DATA_PLAYBACK_CONTEXT_IMAGE_PARAMETERS}
@property
def duration(self) -> int|float:
@@ -1774,19 +1743,19 @@ class PostProcessingContext(ArContext):
@property
def progression(self) -> float:
- """Get data processing progression between 0 and 1."""
+ """Get data playback progression between 0 and 1."""
raise NotImplementedError
@DataFeatures.PipelineStepImage
def image(self, draw_progression: bool = True, **kwargs):
"""
- Get pipeline image with post processing information.
+ Get pipeline image with data playback information.
Parameters:
draw_progression: draw progress bar
"""
- logging.debug('PostProcessingContext.image %s', self.name)
+ logging.debug('DataPlaybackContext.image %s', self.name)
image = super().image(**kwargs)
height, width, _ = image.shape
diff --git a/src/argaze/ArUcoMarker/ArUcoDetector.py b/src/argaze/ArUcoMarker/ArUcoDetector.py
index 50da144..8ff840b 100644
--- a/src/argaze/ArUcoMarker/ArUcoDetector.py
+++ b/src/argaze/ArUcoMarker/ArUcoDetector.py
@@ -21,110 +21,63 @@ import json
from collections import Counter
from typing import Self
-import cv2 as cv
+import cv2
import numpy
-from cv2 import aruco
from argaze import DataFeatures
from argaze.ArUcoMarker import ArUcoMarkerDictionary, ArUcoMarker, ArUcoOpticCalibrator, ArUcoMarkerGroup
-class DetectorParameters():
- """Wrapper class around ArUco marker detector parameters.
+class DetectorParameters(cv2.aruco.DetectorParameters):
+ """OpenCV DetectorParameters wrapper.
!!! note
More details on [opencv page](https://docs.opencv.org/4.x/d1/dcd/structcv_1_1aruco_1_1DetectorParameters.html)
"""
- __parameters = aruco.DetectorParameters()
- __parameters_names = [
- 'adaptiveThreshConstant',
- 'adaptiveThreshWinSizeMax',
- 'adaptiveThreshWinSizeMin',
- 'adaptiveThreshWinSizeStep',
- 'aprilTagCriticalRad',
- 'aprilTagDeglitch',
- 'aprilTagMaxLineFitMse',
- 'aprilTagMaxNmaxima',
- 'aprilTagMinClusterPixels',
- 'aprilTagMinWhiteBlackDiff',
- 'aprilTagQuadDecimate',
- 'aprilTagQuadSigma',
- 'cornerRefinementMaxIterations',
- 'cornerRefinementMethod',
- 'cornerRefinementMinAccuracy',
- 'cornerRefinementWinSize',
- 'markerBorderBits',
- 'minMarkerPerimeterRate',
- 'maxMarkerPerimeterRate',
- 'minMarkerDistanceRate',
- 'detectInvertedMarker',
- 'errorCorrectionRate',
- 'maxErroneousBitsInBorderRate',
- 'minCornerDistanceRate',
- 'minDistanceToBorder',
- 'minOtsuStdDev',
- 'perspectiveRemoveIgnoredMarginPerCell',
- 'perspectiveRemovePixelPerCell',
- 'polygonalApproxAccuracyRate',
- 'useAruco3Detection'
- ]
-
def __init__(self, **kwargs):
- for parameter, value in kwargs.items():
- setattr(self.__parameters, parameter, value)
-
- self.__dict__.update(kwargs)
-
- def __setattr__(self, parameter, value):
+ super().__init__()
- setattr(self.__parameters, parameter, value)
+ self.__modified = []
- def __getattr__(self, parameter):
+ self.__parameters_names = [name for name in dir(self) if not name.startswith('_')]
+ self.__parameters_names.remove('from_json')
+ self.__parameters_names.remove('readDetectorParameters')
+ self.__parameters_names.remove('writeDetectorParameters')
+
+ for parameter, value in kwargs.items():
- return getattr(self.__parameters, parameter)
+ setattr(self, parameter, value)
+ self.__modified.append(parameter)
@classmethod
def from_json(cls, json_filepath) -> Self:
"""Load detector parameters from .json file."""
with open(json_filepath) as configuration_file:
+
return DetectorParameters(**json.load(configuration_file))
def __str__(self) -> str:
"""Detector parameters string representation."""
- return f'{self}'
-
- def __format__(self, spec: str) -> str:
- """Formated detector parameters string representation.
-
- Parameters:
- spec: 'modified' to get only modified parameters.
- """
-
output = ''
for parameter in self.__parameters_names:
- if parameter in self.__dict__.keys():
+ if parameter in self.__modified:
- output += f'\t*{parameter}: {getattr(self.__parameters, parameter)}\n'
+ output += f'\t*{parameter}: {getattr(self, parameter)}\n'
- elif spec == "":
+ else:
- output += f'\t{parameter}: {getattr(self.__parameters, parameter)}\n'
+ output += f'\t{parameter}: {getattr(self, parameter)}\n'
return output
- @property
- def internal(self):
- return self.__parameters
-
-
class ArUcoDetector(DataFeatures.PipelineStepObject):
- """OpenCV ArUco library wrapper."""
+ """OpenCV ArucoDetector wrapper."""
# noinspection PyMissingConstructor
@DataFeatures.PipelineStepInit
@@ -201,7 +154,7 @@ class ArUcoDetector(DataFeatures.PipelineStepObject):
self.__detected_markers, detected_markers_corners, detected_markers_ids = {}, [], []
# Detect markers into gray picture
- detected_markers_corners, detected_markers_ids, _ = aruco.detectMarkers(cv.cvtColor(image, cv.COLOR_BGR2GRAY), self.__dictionary.markers, parameters=self.__parameters.internal if self.__parameters else None)
+ detected_markers_corners, detected_markers_ids, _ = cv2.aruco.detectMarkers(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), self.__dictionary.markers, parameters=self.__parameters)
# Is there detected markers ?
if len(detected_markers_corners) > 0:
@@ -272,7 +225,7 @@ class ArUcoDetector(DataFeatures.PipelineStepObject):
if len(ids) == 0:
ids = self.__detected_markers.keys()
- # Prepare data for aruco.estimatePoseSingleMarkers function
+ # Prepare data for cv2.aruco.estimatePoseSingleMarkers function
selected_markers_corners = tuple()
selected_markers_ids = []
@@ -286,14 +239,14 @@ class ArUcoDetector(DataFeatures.PipelineStepObject):
# Estimate pose of selected markers
if len(selected_markers_corners) > 0:
- markers_rvecs, markers_tvecs, markers_points = aruco.estimatePoseSingleMarkers(selected_markers_corners, size, numpy.array(self.__optic_parameters.K), numpy.array(self.__optic_parameters.D))
+ markers_rvecs, markers_tvecs, markers_points = cv2.aruco.estimatePoseSingleMarkers(selected_markers_corners, size, numpy.array(self.__optic_parameters.K), numpy.array(self.__optic_parameters.D))
for i, marker_id in enumerate(selected_markers_ids):
marker = self.__detected_markers[marker_id]
marker.translation = markers_tvecs[i][0]
- marker.rotation, _ = cv.Rodrigues(markers_rvecs[i][0])
+ marker.rotation, _ = cv2.Rodrigues(markers_rvecs[i][0])
marker.size = size
marker.points = markers_points.reshape(4, 3).dot(marker.rotation) - marker.translation
@@ -328,15 +281,15 @@ class ArUcoDetector(DataFeatures.PipelineStepObject):
"""
# detect markers from gray picture
- gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
- detected_markers_corners, detected_markers_ids, _ = aruco.detectMarkers(gray, self.__dictionary.markers,
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
+ detected_markers_corners, detected_markers_ids, _ = cv2.aruco.detectMarkers(gray, self.__dictionary.markers,
parameters=self.__parameters.internal)
# if all board markers are detected
if len(detected_markers_corners) == expected_markers_number:
self.__board = board
- self.__board_corners_number, self.__board_corners, self.__board_corners_ids = aruco.interpolateCornersCharuco(
+ self.__board_corners_number, self.__board_corners, self.__board_corners_ids = cv2.aruco.interpolateCornersCharuco(
detected_markers_corners, detected_markers_ids, gray, self.__board.model)
else:
@@ -350,7 +303,7 @@ class ArUcoDetector(DataFeatures.PipelineStepObject):
"""Draw detected board corners in image."""
if self.__board is not None:
- cv.drawChessboardCorners(image, ((self.__board.size[0] - 1), (self.__board.size[1] - 1)),
+ cv2.drawChessboardCorners(image, ((self.__board.size[0] - 1), (self.__board.size[1] - 1)),
self.__board_corners, True)
def board_corners_number(self) -> int:
diff --git a/src/argaze/ArUcoMarker/ArUcoMarkerGroup.py b/src/argaze/ArUcoMarker/ArUcoMarkerGroup.py
index 5575cad..8cd8043 100644
--- a/src/argaze/ArUcoMarker/ArUcoMarkerGroup.py
+++ b/src/argaze/ArUcoMarker/ArUcoMarkerGroup.py
@@ -149,7 +149,7 @@ class ArUcoMarkerGroup(DataFeatures.PipelineStepObject):
new_marker = ArUcoMarker.ArUcoMarker(self.__dictionary, identifier, size)
- # Build marker corners thanks to translation vector and rotation matrix
+ # Build marker corners considering translation vector and rotation matrix
place_corners = numpy.array([[-size / 2, size / 2, 0], [size / 2, size / 2, 0], [size / 2, -size / 2, 0], [-size / 2, -size / 2, 0]])
place_corners = place_corners.dot(rmat) + tvec
diff --git a/src/argaze/DataFeatures.py b/src/argaze/DataFeatures.py
index 2629e8e..491d6ac 100644
--- a/src/argaze/DataFeatures.py
+++ b/src/argaze/DataFeatures.py
@@ -703,21 +703,20 @@ def PipelineStepExecutionTime(method):
end = time.perf_counter()
- # Check earlier call dates to calculate frequency
- try:
-
- last_start, last_end = self._execution_times[method.__name__]
-
- if start > last_start:
-
- self._execution_frequencies[method.__name__] = 1 / (start - last_start)
-
- except KeyError:
+ # Create list to store method call dates and init call frequency
+ if method.__name__ not in self._execution_times.keys():
+ self._execution_times[method.__name__] = []
self._execution_frequencies[method.__name__] = math.nan
# Store start end end dates
- self._execution_times[method.__name__] = (start, end)
+ self._execution_times[method.__name__].append((start, end))
+
+ # Remove call dates older than 1 second and count number of calls to get frequency
+ while self._execution_times[method.__name__][-1][0] - self._execution_times[method.__name__][0][0] > 1:
+
+ self._execution_times[method.__name__].pop(0)
+ self._execution_frequencies[method.__name__] = len(self._execution_times[method.__name__])
return result
@@ -1354,8 +1353,7 @@ class PipelineStepObject():
# Check execution time
try:
- start, end = self._execution_times[method_name]
- t = end - start
+ t = numpy.mean(numpy.diff(self._execution_times[method_name]))
except KeyError:
diff --git a/src/argaze/GazeAnalysis/KCoefficient.py b/src/argaze/GazeAnalysis/KCoefficient.py
index f9218cb..c6f303a 100644
--- a/src/argaze/GazeAnalysis/KCoefficient.py
+++ b/src/argaze/GazeAnalysis/KCoefficient.py
@@ -48,7 +48,7 @@ class ScanPathAnalyzer(GazeFeatures.ScanPathAnalyzer):
for scan_step in scan_path:
- durations.append(scan_step.duration)
+ durations.append(scan_step.fixation_duration)
amplitudes.append(scan_step.last_saccade.amplitude)
durations = numpy.array(durations)
@@ -65,7 +65,7 @@ class ScanPathAnalyzer(GazeFeatures.ScanPathAnalyzer):
Ks = []
for scan_step in scan_path:
- Ks.append((abs(scan_step.duration - duration_mean) / duration_std) - (abs(scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std))
+ Ks.append((abs(scan_step.fixation_duration - duration_mean) / duration_std) - (abs(scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std))
self.__K = numpy.array(Ks).mean()
@@ -106,7 +106,7 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer):
for aoi_scan_step in aoi_scan_path:
- durations.append(aoi_scan_step.duration)
+ durations.append(aoi_scan_step.fixation_duration)
amplitudes.append(aoi_scan_step.last_saccade.amplitude)
durations = numpy.array(durations)
@@ -123,7 +123,7 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer):
Ks = []
for aoi_scan_step in aoi_scan_path:
- Ks.append((abs(aoi_scan_step.duration - duration_mean) / duration_std) - (abs(aoi_scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std))
+ Ks.append((abs(aoi_scan_step.fixation_duration - duration_mean) / duration_std) - (abs(aoi_scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std))
self.__K = numpy.array(Ks).mean()
diff --git a/src/argaze/__main__.py b/src/argaze/__main__.py
index 76e9664..c65d6e5 100644
--- a/src/argaze/__main__.py
+++ b/src/argaze/__main__.py
@@ -24,10 +24,11 @@ import contextlib
import time
import os
import stat
+import math
from . import load
from .DataFeatures import SharedObjectBusy
-from .ArFeatures import ArCamera, ArContext, PostProcessingContext, LiveProcessingContext
+from .ArFeatures import ArCamera, ArContext, DataPlaybackContext, DataCaptureContext
from .utils.UtilsFeatures import print_progress_bar
import cv2
@@ -68,7 +69,7 @@ def load_context(args):
# Blanck line
info_stack += 1
- if issubclass(type(context), LiveProcessingContext):
+ if issubclass(type(context), DataCaptureContext):
info_stack += 1
cv2.putText(image, f'Press Enter to start calibration', (int(width/4)+20, int(height/3)+(info_stack*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
@@ -76,7 +77,7 @@ def load_context(args):
info_stack += 1
cv2.putText(image, f'Press r to start/stop recording', (int(width/4)+20, int(height/3)+(info_stack*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
- if issubclass(type(context), PostProcessingContext):
+ if issubclass(type(context), DataPlaybackContext):
info_stack += 1
cv2.putText(image, f'Press Space bar to pause/resume processing', (int(width/4)+20, int(height/3)+(info_stack*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
@@ -199,8 +200,8 @@ def load_context(args):
raise KeyboardInterrupt()
- # Keys specific to live processing contexts
- if issubclass(type(context), LiveProcessingContext):
+ # Keys specific to data capture contexts
+ if issubclass(type(context), DataCaptureContext):
# Enter: start calibration
if key_pressed == 13:
@@ -222,10 +223,10 @@ def load_context(args):
context.create_recording()
context.start_recording()
- # Keys specific to post processing contexts
- if issubclass(type(context), PostProcessingContext):
+ # Keys specific to data playback contexts
+ if issubclass(type(context), DataPlaybackContext):
- # Space bar: pause/resume pipeline processing
+ # Space bar: pause/resume data playback
if key_pressed == 32:
@@ -236,21 +237,11 @@ def load_context(args):
else:
context.pause()
-
- # Select previous image with left arrow
- if key_pressed == 2:
-
- context.previous()
-
- # Select next image with right arrow
- if key_pressed == 3:
-
- context.next()
# Window mode off
else:
- if issubclass(type(context), PostProcessingContext):
+ if issubclass(type(context), DataPlaybackContext):
prefix = f'Progression'
suffix = f'| {int(context.progression*context.duration * 1e-3)}s in {int(time.time()-start_time)}s'
@@ -261,7 +252,8 @@ def load_context(args):
if issubclass(type(context.pipeline), ArCamera):
watch_time, watch_freq = context.pipeline.execution_info('watch')
- suffix += f' | Watch {int(watch_time)}ms at {watch_freq}Hz'
+
+ suffix += f' | Watch {int(watch_time) if not math.isnan(watch_time) else 0}ms at {watch_freq if not math.isnan(watch_freq) else 0}Hz'
# Clear old longer print
suffix += ' '
diff --git a/src/argaze/utils/UtilsFeatures.py b/src/argaze/utils/UtilsFeatures.py
index ce92e35..5dbc10c 100644
--- a/src/argaze/utils/UtilsFeatures.py
+++ b/src/argaze/utils/UtilsFeatures.py
@@ -155,11 +155,6 @@ class TimeProbe():
self.start()
-def tuple_to_string(t: tuple, separator: str = ", ") -> str:
- """Convert tuple elements into quoted strings separated by a separator string."""
-
- return separator.join(f'\"{e}\"' for e in t)
-
def PrintCallStack(method):
"""Define a decorator to print call stack until the decorated method."""
@@ -230,37 +225,29 @@ class FileWriter(DataFeatures.PipelineStepObject):
os.makedirs(self.__path.parent.absolute())
# Open file
- self.__file = open(self.__path, 'w', encoding='utf-8', buffering=1)
+ self.__file = open(self.__path, 'w', encoding='utf-8', newline='', buffering=1)
+ self.__writer = csv.writer(self.__file, delimiter=self.__separator, quoting=csv.QUOTE_NONNUMERIC)
# Write header if required
if self.__header is not None:
- # Format list or tuple element into quoted strings
- if not isinstance(self.__header, str):
-
- self.__header = tuple_to_string(self.__header, self.__separator)
-
- print(self.__header, file=self.__file, flush=True)
+ self.__writer.writerow(self.__header)
@DataFeatures.PipelineStepExit
def __exit__(self, exception_type, exception_value, exception_traceback):
"""Close file."""
+
self.__file.close()
def write(self, data: str|tuple):
- """Write data as a new line into file.
+ """Write data as a new line into file."""
- !!! note
- Tuple elements are converted into quoted strings separated by separator string.
- """
-
- # Format list or tuple element into quoted strings
- if not isinstance(data, str):
-
- data = tuple_to_string(data, self.__separator)
+ if self.__file.closed:
+
+ return
# Write into file
- print(data, file=self.__file, flush=True)
+ self.__writer.writerow(data)
class FileReader(DataFeatures.PipelineStepObject):
"""Read data from a file line by line."""
@@ -305,11 +292,7 @@ class FileReader(DataFeatures.PipelineStepObject):
pass
def read(self) -> str|tuple:
- """Read next data from file.
-
- !!! note
- Quoted strings separated by separator string are converted into tuple elements.
- """
+ """Read next data from file."""
try:
@@ -443,7 +426,7 @@ class LookPerformanceRecorder(FileWriter):
super().__init__(**kwargs)
- self.header = "Timestamp (ms)", "Time (ms)", "Frequency (Hz)"
+ self.header = "Real time (ms)", "Frame timestamp (ms)", "Execution time (ms)", "Execution frequency (Hz)"
self.__start_time = time.perf_counter()
@@ -456,6 +439,7 @@ class LookPerformanceRecorder(FileWriter):
log = (
(time.perf_counter() - self.__start_time) * 1e3,
+ timestamp,
t * 1e3,
f
)
@@ -470,7 +454,7 @@ class WatchPerformanceRecorder(FileWriter):
super().__init__(**kwargs)
- self.header = "Timestamp (ms)", "Time (ms)", "Frequency (Hz)"
+ self.header = "Real time (ms)", "Camera timestamp (ms)", "Execution time (ms)", "Execution frequency (Hz)"
self.__start_time = time.perf_counter()
@@ -483,6 +467,7 @@ class WatchPerformanceRecorder(FileWriter):
log = (
(time.perf_counter() - self.__start_time) * 1e3,
+ timestamp,
t * 1e3,
f
)
diff --git a/src/argaze/utils/contexts/OpenCV.py b/src/argaze/utils/contexts/OpenCV.py
index 273705a..908f91d 100644
--- a/src/argaze/utils/contexts/OpenCV.py
+++ b/src/argaze/utils/contexts/OpenCV.py
@@ -27,7 +27,7 @@ from argaze import ArFeatures, DataFeatures
class Cursor(ArFeatures.ArContext):
- """Process cursor position over OpenCV window.
+ """Capture cursor position over OpenCV window.
!!! warning
It is assumed that an OpenCV window with the same name than the context is used to display context's pipeline image.
@@ -36,7 +36,7 @@ class Cursor(ArFeatures.ArContext):
@DataFeatures.PipelineStepInit
def __init__(self, **kwargs):
- # Init LiveProcessingContext class
+ # Init DataCaptureContext class
super().__init__()
@DataFeatures.PipelineStepEnter
@@ -74,8 +74,8 @@ class Cursor(ArFeatures.ArContext):
self._process_gaze_position(timestamp = int((time.time() - self._start_time) * 1e3), x = x, y = y)
-class Movie(Cursor):
- """Process movie images and cursor position over OpenCV window.
+class Movie(Cursor, ArFeatures.DataPlaybackContext):
+ """Playback movie images and capture cursor position over OpenCV window.
!!! warning
It is assumed that an OpenCV window with the same name than the context is used to display context's pipeline image.
@@ -134,45 +134,35 @@ class Movie(Cursor):
def __read(self):
"""Iterate on movie images."""
- # Init image selection
- _, current_image = self.__movie.read()
- current_image_time = self.__movie.get(cv2.CAP_PROP_POS_MSEC)
- self.__next_image_index = 0 #int(self.__start * self.__movie_fps)
-
- while not self._stop_event.is_set():
+ while self.is_running():
# Check pause event (and stop event)
- while self._pause_event.is_set() and not self._stop_event.is_set():
+ while self.is_paused() and self.is_running():
logging.debug('> reading is paused at %i', current_image_time)
time.sleep(1)
- # Select a new image and detect markers once
- if self.__next_image_index != self.__current_image_index or self.__refresh:
-
- self.__movie.set(cv2.CAP_PROP_POS_FRAMES, self.__next_image_index)
-
- success, image = self.__movie.read()
+ # Read image
+ success, image = self.__movie.read()
- if success:
+ if success:
- video_height, video_width, _ = image.shape
+ # Refresh once
+ self.__refresh = False
- # Refresh once
- self.__refresh = False
+ #self.__current_image_index = self.__movie.get(cv2.CAP_PROP_POS_FRAMES) - 1
+ current_image_time = self.__movie.get(cv2.CAP_PROP_POS_MSEC)
- self.__current_image_index = self.__movie.get(cv2.CAP_PROP_POS_FRAMES) - 1
- current_image_time = self.__movie.get(cv2.CAP_PROP_POS_MSEC)
+ # Timestamp image
+ image = DataFeatures.TimestampedImage(image, timestamp=current_image_time)
- # Timestamp image
- image = DataFeatures.TimestampedImage(image, timestamp=current_image_time)
+ # Process movie image
+ self._process_camera_image(timestamp=current_image_time, image=image)
- # Process movie image
- self._process_camera_image(timestamp=current_image_time, image=image)
-
- # Wait
- time.sleep(1 / self.__movie_fps)
+ # Wait for half frame time
+ # TODO: Consider camera image processing time to adapt waiting time
+ time.sleep(0.5 / self.__movie_fps)
@DataFeatures.PipelineStepExit
def __exit__(self, exception_type, exception_value, exception_traceback):
@@ -182,33 +172,12 @@ class Movie(Cursor):
# Exit from Cursor context
super().__exit__(exception_type, exception_value, exception_traceback)
- # Close data stream
+ # Close data capture
self.stop()
# Stop reading thread
threading.Thread.join(self.__reading_thread)
- def refresh(self):
- """Refresh current frame."""
- self.__refresh = True
-
- def previous(self):
- """Go to previous frame."""
- self.__next_image_index -= 1
-
- # Clip image index
- if self.__next_image_index < 0:
- self.__next_image_index = 0
-
- def next(self):
- """Go to next frame."""
-
- self.__next_image_index += 1
-
- # Clip image index
- if self.__next_image_index < 0:
- self.__next_image_index = 0
-
@property
def duration(self) -> int|float:
"""Get movie duration."""
@@ -217,7 +186,7 @@ class Movie(Cursor):
@property
def progression(self) -> float:
- """Get movie processing progression between 0 and 1."""
+ """Get movie playback progression between 0 and 1."""
if self.__current_image_index is not None:
@@ -225,4 +194,93 @@ class Movie(Cursor):
else:
- return 0. \ No newline at end of file
+ return 0.
+
+class Camera(Cursor, ArFeatures.DataCaptureContext):
+ """Capture camera images and capture cursor position over OpenCV window.
+
+ !!! warning
+ It is assumed that an OpenCV window with the same name than the context is used to display context's pipeline image.
+ """
+ @DataFeatures.PipelineStepInit
+ def __init__(self, **kwargs):
+
+ # Init Cursor class
+ super().__init__()
+
+ # Init private attributes
+ self.__camera_id = None
+ self.__camera = None
+ self.__video_fps = None
+ self.__video_width = None
+ self.__video_height = None
+
+ @property
+ def identifier(self) -> int:
+ """Camera device id."""
+ return self.__camera_id
+
+ @identifier.setter
+ def identifier(self, camera_id: int):
+
+ self.__camera_id = camera_id
+
+ # Load movie
+ self.__camera = cv2.VideoCapture(self.__camera_id)
+ self.__video_fps = self.__camera.get(cv2.CAP_PROP_FPS)
+ self.__video_width = int(self.__camera.get(cv2.CAP_PROP_FRAME_WIDTH))
+ self.__video_height = int(self.__camera.get(cv2.CAP_PROP_FRAME_HEIGHT))
+
+ @DataFeatures.PipelineStepEnter
+ def __enter__(self):
+
+ logging.info('OpenCV.Camera context starts...')
+
+ # Enter in Cursor context
+ super().__enter__()
+
+ # Open reading thread
+ self.__reading_thread = threading.Thread(target=self.__read)
+
+ logging.debug('> starting reading thread...')
+ self.__reading_thread.start()
+
+ return self
+
+ def __read(self):
+ """Iterate on camera images."""
+
+ while self.is_running():
+
+ # Check pause event (and stop event)
+ while self.is_paused() and self.is_running():
+
+ logging.debug('> reading is paused at %i', current_image_time)
+
+ time.sleep(1)
+
+ # Select a new image
+ success, image = self.__camera.read()
+ image_time = self.__camera.get(cv2.CAP_PROP_POS_MSEC)
+
+ if success:
+
+ # Timestamp image
+ image = DataFeatures.TimestampedImage(image, timestamp=image_time)
+
+ # Process movie image
+ self._process_camera_image(timestamp=image_time, image=image)
+
+ @DataFeatures.PipelineStepExit
+ def __exit__(self, exception_type, exception_value, exception_traceback):
+
+ logging.info('OpenCV.Camera context stops...')
+
+ # Exit from Cursor context
+ super().__exit__(exception_type, exception_value, exception_traceback)
+
+ # Close data capture
+ self.stop()
+
+ # Stop reading thread
+ threading.Thread.join(self.__reading_thread)
diff --git a/src/argaze/utils/contexts/PupilLabs.py b/src/argaze/utils/contexts/PupilLabsInvisible.py
index d5a4319..5c9a138 100644
--- a/src/argaze/utils/contexts/PupilLabs.py
+++ b/src/argaze/utils/contexts/PupilLabsInvisible.py
@@ -21,6 +21,7 @@ __license__ = "GPLv3"
import sys
import logging
import time
+
import threading
from dataclasses import dataclass
@@ -33,17 +34,17 @@ import cv2
from pupil_labs.realtime_api.simple import discover_one_device
-class LiveStream(ArFeatures.ArContext):
+class LiveStream(ArFeatures.DataCaptureContext):
@DataFeatures.PipelineStepInit
def __init__(self, **kwargs):
- # Init ArContext class
+ # Init DataCaptureContext class
super().__init__()
def __enter__(self):
- logging.info('Pupil-Labs Device connexion starts...')
+ logging.info('Pupil-Labs Invisible connexion starts...')
# Init timestamp
self.__start_time = time.time()
@@ -76,9 +77,9 @@ class LiveStream(ArFeatures.ArContext):
def __stream_gaze(self):
"""Stream gaze."""
- logging.debug('Stream gaze from Pupil Device')
+ logging.debug('Stream gaze from Pupil Invisible')
- while not self._stop_event.is_set():
+ while self.is_running():
try:
while True:
@@ -107,9 +108,9 @@ class LiveStream(ArFeatures.ArContext):
def __stream_video(self):
"""Stream video."""
- logging.debug('Stream video from Pupil Device')
+ logging.debug('Stream video from Pupil Invisible')
- while not self._stop_event.is_set():
+ while self.is_running():
try:
while True:
@@ -132,7 +133,7 @@ class LiveStream(ArFeatures.ArContext):
logging.debug('Pupil-Labs context stops...')
# Close data stream
- self._stop_event.set()
+ self.stop()
# Stop streaming
threading.Thread.join(self.__gaze_thread)
diff --git a/src/argaze/utils/contexts/TobiiProGlasses2.py b/src/argaze/utils/contexts/TobiiProGlasses2.py
index 7f45f32..fbc9d1e 100644
--- a/src/argaze/utils/contexts/TobiiProGlasses2.py
+++ b/src/argaze/utils/contexts/TobiiProGlasses2.py
@@ -330,12 +330,12 @@ class TobiiJsonDataParser():
return MarkerPosition(data['marker3d'], data['marker2d'])
-class LiveStream(ArFeatures.LiveProcessingContext):
+class LiveStream(ArFeatures.DataCaptureContext):
@DataFeatures.PipelineStepInit
def __init__(self, **kwargs):
- # Init LiveProcessingContext class
+ # Init DataCaptureContext class
super().__init__()
# Init private attributes
@@ -629,7 +629,6 @@ class LiveStream(ArFeatures.LiveProcessingContext):
threading.Thread.join(self.__video_thread)
-
def __make_socket(self):
"""Create a socket to enable network communication."""
@@ -742,15 +741,15 @@ class LiveStream(ArFeatures.LiveProcessingContext):
# Check image validity
if image is None:
- # Wait for half frame time
- time.sleep(2 / self.__video_fps)
+ # Wait 1ms
+ time.sleep(1e-3)
continue
# Check image time validity
if image.time is None:
- # Wait for half frame time
- time.sleep(2 / self.__video_fps)
+ # Wait 1ms
+ time.sleep(1e-3)
continue
# Store first timestamp
@@ -786,9 +785,6 @@ class LiveStream(ArFeatures.LiveProcessingContext):
while not self._stop_event.is_set():
- # Wait for half frame time
- time.sleep(2 / self.__video_fps)
-
# Lock buffer access
with self.__video_buffer_lock:
@@ -812,6 +808,9 @@ class LiveStream(ArFeatures.LiveProcessingContext):
# Clear buffer
self.__video_buffer = None
+ # Wait 1ms
+ time.sleep(1e-3)
+
def __keep_alive(self):
"""Maintain network connection."""
@@ -975,6 +974,11 @@ class LiveStream(ArFeatures.LiveProcessingContext):
self.__calibration_id = None
def get_calibration_status(self) -> str:
+ """Get calibration status.
+
+ Returns:
+ status: 'calibrating', 'calibrated', 'stale', 'uncalibrated' or 'failed' string
+ """
return self.__calibration_status
@@ -1062,9 +1066,9 @@ class LiveStream(ArFeatures.LiveProcessingContext):
@DataFeatures.PipelineStepImage
def image(self, **kwargs):
"""
- Get pipeline image with live processing information.
+ Get pipeline image with data capture information.
"""
- logging.debug('LiveProcessingContext.image %s', self.name)
+ logging.debug('DataCaptureContext.image %s', self.name)
image = super().image(**kwargs)
height, width, _ = image.shape
@@ -1126,7 +1130,7 @@ class LiveStream(ArFeatures.LiveProcessingContext):
return image
-class PostProcessing(ArFeatures.PostProcessingContext):
+class SegmentPlayback(ArFeatures.DataPlaybackContext):
@DataFeatures.PipelineStepInit
def __init__(self, **kwargs):
@@ -1165,6 +1169,7 @@ class PostProcessing(ArFeatures.PostProcessingContext):
self.__sync_event_unit = None
self.__sync_event_factor = None
self.__sync_data_ts = None
+ self.__sync_video_ts = None
self.__sync_ts = None
self.__last_sync_data_ts = None
self.__last_sync_ts = None
@@ -1297,8 +1302,22 @@ class PostProcessing(ArFeatures.PostProcessingContext):
logging.debug('> read image at %i timestamp', video_ts)
- # Process camera image
- self._process_camera_image(timestamp=video_ts, image=video_image)
+ # if sync is required
+ if self.__sync_event is not None:
+
+ # Wait for a first sync event
+ if self.__sync_ts is not None:
+
+ self.__sync_video_ts = int(self.__sync_ts + video_ts - self.__sync_data_ts)
+
+ # Process camera image
+ self._process_camera_image(timestamp=self.__sync_video_ts, image=video_image)
+
+ # Otherwise, always process images
+ elif self.__sync_event is None:
+
+ # Process camera image
+ self._process_camera_image(timestamp=video_ts, image=video_image)
height, width, _ = video_image.shape
@@ -1343,15 +1362,17 @@ class PostProcessing(ArFeatures.PostProcessingContext):
logging.info('Difference between data and sync event timestamps is %i ms', diff_data_ts-diff_sync_ts)
- # Don't process gaze positions if sync is required but sync event not happened yet
- if self.__sync_event is not None and self.__sync_ts is None:
+ # Don't process gaze positions when:
+ # - no image have been processed yet
+ # - no sync event happened yet
+ if self.__sync_video_ts is None or self.__sync_ts is None:
- continue
+ continue
- # Otherwise, synchronize timestamp with sync event
- elif self.__sync_event is not None and self.__sync_ts is not None:
+ # Otherwise, synchronize timestamp with sync event
+ else:
- data_ts = int(self.__sync_ts + data_ts - self.__sync_data_ts)
+ data_ts = int(self.__sync_ts + data_ts - self.__sync_data_ts)
# Process gaze positions
match data_object_type:
@@ -1514,6 +1535,6 @@ class PostProcessing(ArFeatures.PostProcessingContext):
@property
def progression(self) -> float:
- """Get data processing progression between 0 and 1."""
+ """Get data playback progression between 0 and 1."""
return self.__progression \ No newline at end of file
diff --git a/src/argaze/utils/contexts/TobiiProGlasses3.py b/src/argaze/utils/contexts/TobiiProGlasses3.py
new file mode 100644
index 0000000..a53c095
--- /dev/null
+++ b/src/argaze/utils/contexts/TobiiProGlasses3.py
@@ -0,0 +1,128 @@
+"""Handle network connection to Tobii Pro G3 devices.
+ Based on Tobii Realtime Python API.
+ g3pylib must be installed.
+"""
+
+"""
+This program is free software: you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation, either version 3 of the License, or (at your option) any later
+version.
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+You should have received a copy of the GNU General Public License along with
+this program. If not, see <https://www.gnu.org/licenses/>.
+
+"""
+
+__author__ = "Damien Mouratille"
+__credits__ = []
+__copyright__ = "Copyright 2024, Ecole Nationale de l'Aviation Civile (ENAC)"
+__license__ = "GPLv3"
+
+import sys
+import logging
+import time
+import dill
+import threading
+from dataclasses import dataclass
+import numpy
+import cv2
+import asyncio
+import os
+
+from argaze import ArFeatures, DataFeatures, GazeFeatures
+from argaze.utils import UtilsFeatures
+
+
+from g3pylib import connect_to_glasses
+
+
+class LiveStream(ArFeatures.DataCaptureContext):
+
+ @DataFeatures.PipelineStepInit
+ def __init__(self, **kwargs):
+
+ # Init DataCaptureContext class
+ super().__init__()
+
+ def __enter__(self):
+
+ logging.info('Tobii Pro G3 connexion starts...')
+
+ # Init timestamp
+ self.__start_time = time.time()
+
+ self.__loop = asyncio.new_event_loop()
+ self.__loop.run_until_complete(self.__stream_rtsp())
+
+ return self
+
+ async def __stream_rtsp(self):
+ """Stream video and gaze."""
+
+ logging.info('Stream gaze from Tobii Pro G3')
+
+ while self.is_running():
+
+ try:
+ async with connect_to_glasses.with_zeroconf(True,10000) as g3:
+ async with g3.stream_rtsp(scene_camera=True, gaze=True) as streams:
+ async with streams.gaze.decode() as gaze_stream, streams.scene_camera.decode() as scene_stream:
+ while True:
+ frame, frame_timestamp = await scene_stream.get()
+ gaze, gaze_timestamp = await gaze_stream.get()
+ while gaze_timestamp is None or frame_timestamp is None:
+ if frame_timestamp is None:
+ frame, frame_timestamp = await scene_stream.get()
+ if gaze_timestamp is None:
+ gaze, gaze_timestamp = await gaze_stream.get()
+ while gaze_timestamp < frame_timestamp:
+ gaze, gaze_timestamp = await gaze_stream.get()
+ while gaze_timestamp is None:
+ gaze, gaze_timestamp = await gaze_stream.get()
+
+ scene_frame = frame.to_ndarray(format="bgr24")
+
+ gaze_timestamp = int((gaze_timestamp - self.__start_time) * 1e3)
+
+ logging.debug('Gaze received at %i timestamp', gaze_timestamp)
+
+ # If given gaze data
+ if "gaze2d" in gaze:
+ gaze2d = gaze["gaze2d"]
+ # Convert rational (x,y) to pixel location (x,y)
+ h, w = scene_frame.shape[:2]
+ gaze_scene = (int(gaze2d[0] * w), int(gaze2d[1] * h))
+
+
+ self._process_gaze_position(
+ timestamp=gaze_timestamp,
+ x=gaze_scene[0],
+ y=gaze_scene[1])
+ else:
+ # Process empty gaze position
+ logging.debug('Not worn at %i timestamp', gaze_timestamp)
+
+ scene_timestamp = int((frame_timestamp - self.__start_time) * 1e3)
+
+ logging.debug('Video received at %i timestamp', scene_timestamp)
+
+ self._process_camera_image(
+ timestamp=scene_timestamp,
+ image=scene_frame)
+
+ except KeyboardInterrupt:
+ pass
+
+
+
+ @DataFeatures.PipelineStepExit
+ def __exit__(self, exception_type, exception_value, exception_traceback):
+
+ logging.debug('Tobii Pro G3 context stops...')
+
+ # Close data stream
+ self.stop()
+
diff --git a/src/argaze/utils/demo/aruco_markers_pipeline.json b/src/argaze/utils/demo/aruco_markers_pipeline.json
index f29111b..8221cec 100644
--- a/src/argaze/utils/demo/aruco_markers_pipeline.json
+++ b/src/argaze/utils/demo/aruco_markers_pipeline.json
@@ -1,12 +1,12 @@
{
"argaze.ArUcoMarker.ArUcoCamera.ArUcoCamera": {
"name": "Head-mounted camera",
- "size": [1920, 1080],
+ "size": [1088, 1080],
"copy_background_into_scenes_frames": true,
"aruco_detector": {
"dictionary": "DICT_APRILTAG_16h5",
"parameters": {
- "useAruco3Detection": 1
+ "useAruco3Detection": true
}
},
"sides_mask": 420,
@@ -56,7 +56,7 @@
},
"frames": {
"GrayRectangle": {
- "size": [1920, 1149],
+ "size": [1088, 1080],
"background": "frame_background.jpg",
"gaze_movement_identifier": {
"argaze.GazeAnalysis.DispersionThresholdIdentification.GazeMovementIdentifier": {
@@ -67,11 +67,35 @@
"scan_path": {
"duration_max": 10000
},
+ "scan_path_analyzers": {
+ "argaze.GazeAnalysis.Basic.ScanPathAnalyzer": {},
+ "argaze.GazeAnalysis.KCoefficient.ScanPathAnalyzer": {},
+ "argaze.GazeAnalysis.NearestNeighborIndex.ScanPathAnalyzer": {
+ "size": [1088, 1080]
+ },
+ "argaze.GazeAnalysis.ExploreExploitRatio.ScanPathAnalyzer": {
+ "short_fixation_duration_threshold": 0
+ }
+ },
"layers": {
"demo_layer": {
"aoi_scene": "aoi_2d_scene.json",
"aoi_matcher": {
"argaze.GazeAnalysis.FocusPointInside.AOIMatcher": {}
+ },
+ "aoi_scan_path": {
+ "duration_max": 10000
+ },
+ "aoi_scan_path_analyzers": {
+ "argaze.GazeAnalysis.Basic.AOIScanPathAnalyzer": {},
+ "argaze.GazeAnalysis.TransitionMatrix.AOIScanPathAnalyzer": {},
+ "argaze.GazeAnalysis.KCoefficient.AOIScanPathAnalyzer": {},
+ "argaze.GazeAnalysis.LempelZivComplexity.AOIScanPathAnalyzer": {},
+ "argaze.GazeAnalysis.NGram.AOIScanPathAnalyzer": {
+ "n_min": 3,
+ "n_max": 3
+ },
+ "argaze.GazeAnalysis.Entropy.AOIScanPathAnalyzer":{}
}
}
},
@@ -116,17 +140,7 @@
}
}
}
- },
- "angle_tolerance": 15.0,
- "distance_tolerance": 2.54
- }
- },
- "observers": {
- "argaze.utils.UtilsFeatures.LookPerformanceRecorder": {
- "path": "_export/records/look_performance.csv"
- },
- "argaze.utils.UtilsFeatures.WatchPerformanceRecorder": {
- "path": "_export/records/watch_performance.csv"
+ }
}
}
}
diff --git a/src/argaze/utils/demo/gaze_analysis_pipeline.json b/src/argaze/utils/demo/gaze_analysis_pipeline.json
index 8b8212e..6e23321 100644
--- a/src/argaze/utils/demo/gaze_analysis_pipeline.json
+++ b/src/argaze/utils/demo/gaze_analysis_pipeline.json
@@ -1,7 +1,7 @@
{
"argaze.ArFeatures.ArFrame": {
"name": "GrayRectangle",
- "size": [1920, 1149],
+ "size": [1088, 1080],
"background": "frame_background.jpg",
"gaze_movement_identifier": {
"argaze.GazeAnalysis.DispersionThresholdIdentification.GazeMovementIdentifier": {
diff --git a/src/argaze/utils/demo/opencv_camera_context.json b/src/argaze/utils/demo/opencv_camera_context.json
new file mode 100644
index 0000000..b280c73
--- /dev/null
+++ b/src/argaze/utils/demo/opencv_camera_context.json
@@ -0,0 +1,7 @@
+{
+ "argaze.utils.contexts.OpenCV.Camera" : {
+ "name": "OpenCV camera",
+ "identifier": 0,
+ "pipeline": "aruco_markers_pipeline.json"
+ }
+} \ No newline at end of file
diff --git a/src/argaze/utils/demo/opencv_movie_context.json b/src/argaze/utils/demo/opencv_movie_context.json
index f7da7ee..930a0fc 100644
--- a/src/argaze/utils/demo/opencv_movie_context.json
+++ b/src/argaze/utils/demo/opencv_movie_context.json
@@ -1,6 +1,6 @@
{
"argaze.utils.contexts.OpenCV.Movie" : {
- "name": "OpenCV Window",
+ "name": "OpenCV movie",
"path": "./src/argaze/utils/demo/tobii_record/segments/1/fullstream.mp4",
"pipeline": "aruco_markers_pipeline.json"
}
diff --git a/src/argaze/utils/demo/pupillabs_invisible_live_stream_context.json b/src/argaze/utils/demo/pupillabs_invisible_live_stream_context.json
new file mode 100644
index 0000000..3418de6
--- /dev/null
+++ b/src/argaze/utils/demo/pupillabs_invisible_live_stream_context.json
@@ -0,0 +1,6 @@
+{
+ "argaze.utils.contexts.PupilLabsInvisible.LiveStream" : {
+ "name": "PupilLabs Invisible",
+ "pipeline": "aruco_markers_pipeline.json"
+ }
+} \ No newline at end of file
diff --git a/src/argaze/utils/demo/pupillabs_live_stream_context.json b/src/argaze/utils/demo/pupillabs_live_stream_context.json
deleted file mode 100644
index bcb7263..0000000
--- a/src/argaze/utils/demo/pupillabs_live_stream_context.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "argaze.utils.contexts.PupilLabs.LiveStream" : {
- "name": "PupilLabs",
- "pipeline": "aruco_markers_pipeline.json"
- }
-} \ No newline at end of file
diff --git a/src/argaze/utils/demo/recorders.py b/src/argaze/utils/demo/recorders.py
index 82022ce..979eaff 100644
--- a/src/argaze/utils/demo/recorders.py
+++ b/src/argaze/utils/demo/recorders.py
@@ -43,7 +43,7 @@ class FixationRecorder(UtilsFeatures.FileWriter):
log = (
timestamp,
- frame.last_gaze_movement().focus,
+ frame.last_gaze_movement().focus.value,
frame.last_gaze_movement().duration,
frame.layers['demo_layer'].last_looked_aoi_name()
)
@@ -117,61 +117,3 @@ class AOIScanPathAnalysisRecorder(UtilsFeatures.FileWriter):
)
self.write(log)
-
-
-class ArUcoMarkersPoseRecorder(DataFeatures.PipelineStepObject):
-
- @DataFeatures.PipelineStepInit
- def __init__(self, **kwargs):
-
- # Init private attributes
- self.__output_folder = None
- self.__size = None
-
- @property
- def output_folder(self) -> str:
- """folder path where to write ArUco markers pose."""
- return self.__output_folder
-
- @output_folder.setter
- def output_folder(self, output_folder: str):
-
- self.__output_folder = output_folder
-
- @property
- def size(self) -> float:
- """Expected size in centimeters of detected markers."""
- return self.__output_folder
-
- @size.setter
- def size(self, size: float):
-
- self.__size = size
-
- @property
- def ids(self) -> list:
- """Ids of markers to estimate pose (default all)."""
- return self.__ids
-
- @ids.setter
- def ids(self, ids: list):
-
- self.__ids = ids
-
- def on_detect_markers(self, timestamp, aruco_detector, exception):
-
- logging.info('%s writes estimated markers pose into %s', DataFeatures.get_class_path(self), self.__output_folder)
-
- if self.__size is not None:
-
- # Estimate all detected markers pose
- aruco_detector.estimate_markers_pose(self.__size, ids = self.__ids)
-
- # Build ArUco markers group from detected markers
- aruco_markers_group = ArUcoMarkerGroup.ArUcoMarkerGroup(dictionary=aruco_detector.dictionary, places=aruco_detector.detected_markers())
-
- if self.__output_folder is not None:
-
- # Write ArUco markers group
- aruco_markers_group.to_obj(f'{self.__output_folder}/{int(timestamp)}-aruco_markers_group.obj')
- \ No newline at end of file
diff --git a/src/argaze/utils/demo/tobii_live_stream_context.json b/src/argaze/utils/demo/tobii_g2_live_stream_context.json
index 6950617..6950617 100644
--- a/src/argaze/utils/demo/tobii_live_stream_context.json
+++ b/src/argaze/utils/demo/tobii_g2_live_stream_context.json
diff --git a/src/argaze/utils/demo/tobii_g3_live_stream_context.json b/src/argaze/utils/demo/tobii_g3_live_stream_context.json
new file mode 100644
index 0000000..20f6ab1
--- /dev/null
+++ b/src/argaze/utils/demo/tobii_g3_live_stream_context.json
@@ -0,0 +1,6 @@
+{
+ "argaze.utils.contexts.TobiiProGlasses3.LiveStream" : {
+ "name": "Tobii Pro Glasses 3 live stream",
+ "pipeline": "aruco_markers_pipeline.json"
+ }
+} \ No newline at end of file
diff --git a/src/argaze/utils/demo/tobii_post_processing_context.json b/src/argaze/utils/demo/tobii_segment_playback_context.json
index 7a73512..d481b23 100644
--- a/src/argaze/utils/demo/tobii_post_processing_context.json
+++ b/src/argaze/utils/demo/tobii_segment_playback_context.json
@@ -1,6 +1,6 @@
{
- "argaze.utils.contexts.TobiiProGlasses2.PostProcessing" : {
- "name": "Tobii Pro Glasses 2 post-processing",
+ "argaze.utils.contexts.TobiiProGlasses2.SegmentPlayback" : {
+ "name": "Tobii Pro Glasses 2 segment playback",
"segment": "./src/argaze/utils/demo/tobii_record/segments/1",
"pipeline": "aruco_markers_pipeline.json"
}
diff --git a/src/argaze/utils/estimate_markers_pose/observers.py b/src/argaze/utils/estimate_markers_pose/observers.py
index 88da4f9..bbca1ad 100644
--- a/src/argaze/utils/estimate_markers_pose/observers.py
+++ b/src/argaze/utils/estimate_markers_pose/observers.py
@@ -18,6 +18,8 @@ __copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)"
__license__ = "GPLv3"
import logging
+import os
+import pathlib
from argaze import DataFeatures
from argaze.ArUcoMarker import ArUcoMarkerGroup
@@ -41,7 +43,11 @@ class ArUcoMarkersPoseRecorder(DataFeatures.PipelineStepObject):
@output_folder.setter
def output_folder(self, output_folder: str):
- self.__output_folder = output_folder
+ self.__output_folder = pathlib.Path(output_folder)
+
+ if not os.path.exists(self.__output_folder.absolute()):
+
+ os.makedirs(self.__output_folder.absolute())
def on_detect_markers(self, timestamp, aruco_detector, exception):
diff --git a/src/argaze/utils/estimate_markers_pose/pipeline.json b/src/argaze/utils/estimate_markers_pose/pipeline.json
index 2e0ab76..c16cce3 100644
--- a/src/argaze/utils/estimate_markers_pose/pipeline.json
+++ b/src/argaze/utils/estimate_markers_pose/pipeline.json
@@ -7,7 +7,7 @@
"pose_size": 4,
"pose_ids": [],
"parameters": {
- "useAruco3Detection": 1
+ "useAruco3Detection": true
},
"observers":{
"observers.ArUcoMarkersPoseRecorder": {
diff --git a/utils/processTobiiRecords.sh b/utils/processTobiiRecords.sh
index 0cc3eb4..bbe6c86 100644
--- a/utils/processTobiiRecords.sh
+++ b/utils/processTobiiRecords.sh
@@ -12,7 +12,7 @@
# Arguments:
# $1: ArGaze context file
# $2: folder from where to look for Tobii records
-# $3: folder where to export ArGaze processing outputs
+# $3: folder where to export processing outputs
#######################################
# Check required arguments
@@ -103,12 +103,12 @@ function process_segment() {
mkdir -p $seg_output
cd $seg_output
- # Launch argaze with modified context
- echo "*** ArGaze processing starts"
+ # Launch modified context with argaze load command
+ echo "*** ArGaze starts context"
python -m argaze load $context_file
- echo "*** ArGaze processing ends"
+ echo "*** ArGaze ends context"
# Move back to context folder
cd $ctx_folder