aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThéo de la Hogue2023-11-07 15:54:45 +0100
committerThéo de la Hogue2023-11-07 15:54:45 +0100
commit78ce6ffc892ef7d64a8d1da0dbdfcbf34d214bbd (patch)
tree4509c14aa1800d2666c50c47549a044e5a6c11d0
parentbc9257268bb54ea68f777cbb853dc6498274dd99 (diff)
parentf8b1a36c9e486ef19f62159475b9bf19a5b90a03 (diff)
downloadargaze-78ce6ffc892ef7d64a8d1da0dbdfcbf34d214bbd.zip
argaze-78ce6ffc892ef7d64a8d1da0dbdfcbf34d214bbd.tar.gz
argaze-78ce6ffc892ef7d64a8d1da0dbdfcbf34d214bbd.tar.bz2
argaze-78ce6ffc892ef7d64a8d1da0dbdfcbf34d214bbd.tar.xz
Merge branch 'master' of ssh://git.recherche.enac.fr/interne-ihm-aero/eye-tracking/argaze
-rw-r--r--docs/img/aoi_2d_description.pngbin0 -> 7458 bytes
-rw-r--r--docs/img/aoi_3d_description.pngbin0 -> 16928 bytes
-rw-r--r--docs/img/aoi_description.pngbin14538 -> 0 bytes
-rw-r--r--docs/img/aoi_matcher.pngbin0 -> 16444 bytes
-rw-r--r--docs/img/aoi_projection.pngbin20707 -> 0 bytes
-rw-r--r--docs/img/aoi_scan_path.pngbin29067 -> 13583 bytes
-rw-r--r--docs/img/ar_frame.pngbin24931 -> 16684 bytes
-rw-r--r--docs/img/ar_frame_background.pngbin101101 -> 0 bytes
-rw-r--r--docs/img/ar_frame_gaze_movement_identifier.pngbin27362 -> 0 bytes
-rw-r--r--docs/img/ar_frame_heatmap.pngbin60597 -> 0 bytes
-rw-r--r--docs/img/ar_frame_scan_path.pngbin18906 -> 0 bytes
-rw-r--r--docs/img/ar_frame_visualisation.pngbin31964 -> 0 bytes
-rw-r--r--docs/img/ar_layer.pngbin19460 -> 16374 bytes
-rw-r--r--docs/img/ar_layer_aoi_matcher.pngbin22948 -> 0 bytes
-rw-r--r--docs/img/ar_layer_aoi_scan_path.pngbin14711 -> 0 bytes
-rw-r--r--docs/img/ar_layer_aoi_scene.pngbin9014 -> 0 bytes
-rw-r--r--docs/img/aruco_camera_aoi_frame.pngbin0 -> 49405 bytes
-rw-r--r--docs/img/aruco_camera_aoi_projection.pngbin57019 -> 37858 bytes
-rw-r--r--docs/img/aruco_camera_frame.pngbin49201 -> 31515 bytes
-rw-r--r--docs/img/aruco_camera_gaze_movement_identification.pngbin56059 -> 36834 bytes
-rw-r--r--docs/img/aruco_camera_markers_detection.pngbin52844 -> 35629 bytes
-rw-r--r--docs/img/aruco_camera_pose_estimation.pngbin51798 -> 34695 bytes
-rw-r--r--docs/img/aruco_dictionaries.pngbin89114 -> 66662 bytes
-rw-r--r--docs/img/aruco_markers_description.pngbin17207 -> 16455 bytes
-rw-r--r--docs/img/aruco_scene.pngbin17124 -> 0 bytes
-rw-r--r--docs/img/background.pngbin0 -> 189282 bytes
-rw-r--r--docs/img/circle_intersection.pngbin10800 -> 0 bytes
-rw-r--r--docs/img/contains_point.pngbin6505 -> 0 bytes
-rw-r--r--docs/img/detected_markers.pngbin14941 -> 0 bytes
-rw-r--r--docs/img/distance.pngbin9679 -> 0 bytes
-rw-r--r--docs/img/fixation_and_saccade.pngbin22230 -> 0 bytes
-rw-r--r--docs/img/gaze_movement_identifier.pngbin0 -> 22800 bytes
-rw-r--r--docs/img/get_last_before.pngbin9017 -> 0 bytes
-rw-r--r--docs/img/get_last_until.pngbin9113 -> 0 bytes
-rw-r--r--docs/img/heatmap.pngbin46531 -> 50869 bytes
-rw-r--r--docs/img/opencv_aruco.pngbin0 -> 25065 bytes
-rw-r--r--docs/img/optic_calibrated.pngbin9118 -> 0 bytes
-rw-r--r--docs/img/optic_distorsion.pngbin21220 -> 0 bytes
-rw-r--r--docs/img/overlapping.pngbin15668 -> 0 bytes
-rw-r--r--docs/img/point_spread.pngbin8542 -> 8454 bytes
-rw-r--r--docs/img/pop_last_before.pngbin10447 -> 0 bytes
-rw-r--r--docs/img/pop_last_until.pngbin11074 -> 0 bytes
-rw-r--r--docs/img/pose_estimation.pngbin15818 -> 0 bytes
-rw-r--r--docs/img/scan_path.pngbin20567 -> 12724 bytes
-rw-r--r--docs/img/scene.pngbin44671 -> 41117 bytes
-rw-r--r--docs/img/timestamped_gaze_positions.pngbin23134 -> 11209 bytes
-rw-r--r--docs/img/vision_cone.pngbin14240 -> 0 bytes
-rw-r--r--docs/img/visualisation.pngbin0 -> 23984 bytes
-rw-r--r--docs/index.md4
-rw-r--r--docs/user_guide/ar_environment/environment_exploitation.md36
-rw-r--r--docs/user_guide/ar_environment/environment_setup.md77
-rw-r--r--docs/user_guide/ar_environment/introduction.md6
-rw-r--r--docs/user_guide/areas_of_interest/aoi_matching.md48
-rw-r--r--docs/user_guide/areas_of_interest/aoi_scene_description.md83
-rw-r--r--docs/user_guide/areas_of_interest/aoi_scene_projection.md22
-rw-r--r--docs/user_guide/areas_of_interest/heatmap.md40
-rw-r--r--docs/user_guide/areas_of_interest/introduction.md8
-rw-r--r--docs/user_guide/areas_of_interest/vision_cone_filtering.md18
-rw-r--r--docs/user_guide/aruco_markers/dictionary_selection.md17
-rw-r--r--docs/user_guide/aruco_markers/introduction.md15
-rw-r--r--docs/user_guide/aruco_markers/markers_creation.md17
-rw-r--r--docs/user_guide/aruco_markers/markers_detection.md47
-rw-r--r--docs/user_guide/aruco_markers/markers_pose_estimation.md20
-rw-r--r--docs/user_guide/aruco_markers/markers_scene_description.md146
-rw-r--r--docs/user_guide/aruco_markers_pipeline/advanced_topics/aruco_detector_configuration.md40
-rw-r--r--docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md66
-rw-r--r--docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md136
-rw-r--r--docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md53
-rw-r--r--docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md128
-rw-r--r--docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md (renamed from docs/user_guide/aruco_markers_pipeline/aoi_projection.md)51
-rw-r--r--docs/user_guide/aruco_markers_pipeline/aoi_description.md62
-rw-r--r--docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md35
-rw-r--r--docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md41
-rw-r--r--docs/user_guide/aruco_markers_pipeline/introduction.md24
-rw-r--r--docs/user_guide/aruco_markers_pipeline/pose_estimation.md30
-rw-r--r--docs/user_guide/gaze_analysis/gaze_movement.md163
-rw-r--r--docs/user_guide/gaze_analysis/gaze_position.md98
-rw-r--r--docs/user_guide/gaze_analysis/introduction.md7
-rw-r--r--docs/user_guide/gaze_analysis/scan_path.md169
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/advanced_topics/module_loading.md10
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md12
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md57
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md64
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/background.md8
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md24
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/heatmap.md10
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/introduction.md11
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/logging.md4
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_matchers.md2
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_scan_path_analyzers.md2
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/pipeline_modules/scan_path_analyzers.md6
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md2
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/visualisation.md37
-rw-r--r--docs/user_guide/timestamped_data/data_synchronisation.md106
-rw-r--r--docs/user_guide/timestamped_data/introduction.md6
-rw-r--r--docs/user_guide/timestamped_data/ordered_dictionary.md19
-rw-r--r--docs/user_guide/timestamped_data/pandas_dataframe_conversion.md41
-rw-r--r--docs/user_guide/timestamped_data/saving_and_loading.md14
-rw-r--r--docs/user_guide/utils/ready-made_scripts.md6
-rw-r--r--mkdocs.yml35
-rw-r--r--setup.py2
-rw-r--r--src/argaze.test/AreaOfInterest/AOI2DScene.py6
-rw-r--r--src/argaze.test/AreaOfInterest/AOI3DScene.py6
-rw-r--r--src/argaze.test/AreaOfInterest/AOIFeatures.py8
-rw-r--r--src/argaze.test/GazeAnalysis/ExploreExploitRatio.py (renamed from src/argaze.test/GazeAnalysis/ExploitExploreRatio.py)6
-rw-r--r--src/argaze.test/GazeFeatures.py4
-rw-r--r--src/argaze/ArFeatures.py322
-rw-r--r--src/argaze/ArUcoMarkers/ArUcoCamera.py42
-rw-r--r--src/argaze/ArUcoMarkers/ArUcoDetector.py108
-rw-r--r--src/argaze/ArUcoMarkers/ArUcoMarker.py4
-rw-r--r--src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py412
-rw-r--r--src/argaze/ArUcoMarkers/ArUcoScene.py32
-rw-r--r--src/argaze/AreaOfInterest/AOI2DScene.py119
-rw-r--r--src/argaze/AreaOfInterest/AOI3DScene.py19
-rw-r--r--src/argaze/AreaOfInterest/AOIFeatures.py144
-rw-r--r--src/argaze/DataStructures.py25
-rw-r--r--src/argaze/GazeAnalysis/Basic.py23
-rw-r--r--src/argaze/GazeAnalysis/DeviationCircleCoverage.py35
-rw-r--r--src/argaze/GazeAnalysis/DispersionThresholdIdentification.py13
-rw-r--r--src/argaze/GazeAnalysis/ExploreExploitRatio.py (renamed from src/argaze/GazeAnalysis/ExploitExploreRatio.py)21
-rw-r--r--src/argaze/GazeAnalysis/FocusPointInside.py8
-rw-r--r--src/argaze/GazeAnalysis/KCoefficient.py29
-rw-r--r--src/argaze/GazeAnalysis/LinearRegression.py107
-rw-r--r--src/argaze/GazeAnalysis/TransitionMatrix.py2
-rw-r--r--src/argaze/GazeAnalysis/VelocityThresholdIdentification.py13
-rw-r--r--src/argaze/GazeAnalysis/__init__.py2
-rw-r--r--src/argaze/GazeFeatures.py146
-rw-r--r--src/argaze/utils/aruco_markers_group_export.py160
-rw-r--r--src/argaze/utils/aruco_markers_scene_export.py176
-rw-r--r--src/argaze/utils/demo_aruco_markers_run.py76
-rw-r--r--src/argaze/utils/demo_data/aoi_2d_scene.json18
-rw-r--r--src/argaze/utils/demo_data/aoi_3d_scene.obj50
-rw-r--r--src/argaze/utils/demo_data/demo_aruco_markers_setup.json54
-rw-r--r--src/argaze/utils/demo_data/demo_gaze_analysis_setup.json35
-rw-r--r--src/argaze/utils/demo_gaze_analysis_run.py26
135 files changed, 1980 insertions, 2456 deletions
diff --git a/docs/img/aoi_2d_description.png b/docs/img/aoi_2d_description.png
new file mode 100644
index 0000000..51a98b0
--- /dev/null
+++ b/docs/img/aoi_2d_description.png
Binary files differ
diff --git a/docs/img/aoi_3d_description.png b/docs/img/aoi_3d_description.png
new file mode 100644
index 0000000..caf7efc
--- /dev/null
+++ b/docs/img/aoi_3d_description.png
Binary files differ
diff --git a/docs/img/aoi_description.png b/docs/img/aoi_description.png
deleted file mode 100644
index 794ef68..0000000
--- a/docs/img/aoi_description.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/aoi_matcher.png b/docs/img/aoi_matcher.png
new file mode 100644
index 0000000..c9c6dcd
--- /dev/null
+++ b/docs/img/aoi_matcher.png
Binary files differ
diff --git a/docs/img/aoi_projection.png b/docs/img/aoi_projection.png
deleted file mode 100644
index a83b9cd..0000000
--- a/docs/img/aoi_projection.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/aoi_scan_path.png b/docs/img/aoi_scan_path.png
index 7cac491..80c65d4 100644
--- a/docs/img/aoi_scan_path.png
+++ b/docs/img/aoi_scan_path.png
Binary files differ
diff --git a/docs/img/ar_frame.png b/docs/img/ar_frame.png
index 65fa0ea..6ab7eeb 100644
--- a/docs/img/ar_frame.png
+++ b/docs/img/ar_frame.png
Binary files differ
diff --git a/docs/img/ar_frame_background.png b/docs/img/ar_frame_background.png
deleted file mode 100644
index 7bc16da..0000000
--- a/docs/img/ar_frame_background.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/ar_frame_gaze_movement_identifier.png b/docs/img/ar_frame_gaze_movement_identifier.png
deleted file mode 100644
index 8a66cac..0000000
--- a/docs/img/ar_frame_gaze_movement_identifier.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/ar_frame_heatmap.png b/docs/img/ar_frame_heatmap.png
deleted file mode 100644
index 812cc8f..0000000
--- a/docs/img/ar_frame_heatmap.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/ar_frame_scan_path.png b/docs/img/ar_frame_scan_path.png
deleted file mode 100644
index 671d6a5..0000000
--- a/docs/img/ar_frame_scan_path.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/ar_frame_visualisation.png b/docs/img/ar_frame_visualisation.png
deleted file mode 100644
index a9c9032..0000000
--- a/docs/img/ar_frame_visualisation.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/ar_layer.png b/docs/img/ar_layer.png
index 418d879..ec42c22 100644
--- a/docs/img/ar_layer.png
+++ b/docs/img/ar_layer.png
Binary files differ
diff --git a/docs/img/ar_layer_aoi_matcher.png b/docs/img/ar_layer_aoi_matcher.png
deleted file mode 100644
index 63caf4d..0000000
--- a/docs/img/ar_layer_aoi_matcher.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/ar_layer_aoi_scan_path.png b/docs/img/ar_layer_aoi_scan_path.png
deleted file mode 100644
index 1a4dad3..0000000
--- a/docs/img/ar_layer_aoi_scan_path.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/ar_layer_aoi_scene.png b/docs/img/ar_layer_aoi_scene.png
deleted file mode 100644
index 96bfc12..0000000
--- a/docs/img/ar_layer_aoi_scene.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/aruco_camera_aoi_frame.png b/docs/img/aruco_camera_aoi_frame.png
new file mode 100644
index 0000000..f21cc8d
--- /dev/null
+++ b/docs/img/aruco_camera_aoi_frame.png
Binary files differ
diff --git a/docs/img/aruco_camera_aoi_projection.png b/docs/img/aruco_camera_aoi_projection.png
index 59a8ab0..df1ec4c 100644
--- a/docs/img/aruco_camera_aoi_projection.png
+++ b/docs/img/aruco_camera_aoi_projection.png
Binary files differ
diff --git a/docs/img/aruco_camera_frame.png b/docs/img/aruco_camera_frame.png
index 443285f..0825f18 100644
--- a/docs/img/aruco_camera_frame.png
+++ b/docs/img/aruco_camera_frame.png
Binary files differ
diff --git a/docs/img/aruco_camera_gaze_movement_identification.png b/docs/img/aruco_camera_gaze_movement_identification.png
index fc9ff39..34cc74f 100644
--- a/docs/img/aruco_camera_gaze_movement_identification.png
+++ b/docs/img/aruco_camera_gaze_movement_identification.png
Binary files differ
diff --git a/docs/img/aruco_camera_markers_detection.png b/docs/img/aruco_camera_markers_detection.png
index 6192e09..a954313 100644
--- a/docs/img/aruco_camera_markers_detection.png
+++ b/docs/img/aruco_camera_markers_detection.png
Binary files differ
diff --git a/docs/img/aruco_camera_pose_estimation.png b/docs/img/aruco_camera_pose_estimation.png
index b6c2675..ebc1993 100644
--- a/docs/img/aruco_camera_pose_estimation.png
+++ b/docs/img/aruco_camera_pose_estimation.png
Binary files differ
diff --git a/docs/img/aruco_dictionaries.png b/docs/img/aruco_dictionaries.png
index ed5f287..033bbfb 100644
--- a/docs/img/aruco_dictionaries.png
+++ b/docs/img/aruco_dictionaries.png
Binary files differ
diff --git a/docs/img/aruco_markers_description.png b/docs/img/aruco_markers_description.png
index 2f3d1c2..d7c139c 100644
--- a/docs/img/aruco_markers_description.png
+++ b/docs/img/aruco_markers_description.png
Binary files differ
diff --git a/docs/img/aruco_scene.png b/docs/img/aruco_scene.png
deleted file mode 100644
index d8aea8e..0000000
--- a/docs/img/aruco_scene.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/background.png b/docs/img/background.png
new file mode 100644
index 0000000..3faef14
--- /dev/null
+++ b/docs/img/background.png
Binary files differ
diff --git a/docs/img/circle_intersection.png b/docs/img/circle_intersection.png
deleted file mode 100644
index 6893d32..0000000
--- a/docs/img/circle_intersection.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/contains_point.png b/docs/img/contains_point.png
deleted file mode 100644
index 71a1050..0000000
--- a/docs/img/contains_point.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/detected_markers.png b/docs/img/detected_markers.png
deleted file mode 100644
index 588364d..0000000
--- a/docs/img/detected_markers.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/distance.png b/docs/img/distance.png
deleted file mode 100644
index 31cd249..0000000
--- a/docs/img/distance.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/fixation_and_saccade.png b/docs/img/fixation_and_saccade.png
deleted file mode 100644
index 1bd91b9..0000000
--- a/docs/img/fixation_and_saccade.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/gaze_movement_identifier.png b/docs/img/gaze_movement_identifier.png
new file mode 100644
index 0000000..14dfad8
--- /dev/null
+++ b/docs/img/gaze_movement_identifier.png
Binary files differ
diff --git a/docs/img/get_last_before.png b/docs/img/get_last_before.png
deleted file mode 100644
index 97d4170..0000000
--- a/docs/img/get_last_before.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/get_last_until.png b/docs/img/get_last_until.png
deleted file mode 100644
index 4af2c26..0000000
--- a/docs/img/get_last_until.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/heatmap.png b/docs/img/heatmap.png
index 5f07d77..534ccc7 100644
--- a/docs/img/heatmap.png
+++ b/docs/img/heatmap.png
Binary files differ
diff --git a/docs/img/opencv_aruco.png b/docs/img/opencv_aruco.png
new file mode 100644
index 0000000..0aa161e
--- /dev/null
+++ b/docs/img/opencv_aruco.png
Binary files differ
diff --git a/docs/img/optic_calibrated.png b/docs/img/optic_calibrated.png
deleted file mode 100644
index 586c4d6..0000000
--- a/docs/img/optic_calibrated.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/optic_distorsion.png b/docs/img/optic_distorsion.png
deleted file mode 100644
index 2de9937..0000000
--- a/docs/img/optic_distorsion.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/overlapping.png b/docs/img/overlapping.png
deleted file mode 100644
index 0fc1b72..0000000
--- a/docs/img/overlapping.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/point_spread.png b/docs/img/point_spread.png
index 7ee39bc..9d14a40 100644
--- a/docs/img/point_spread.png
+++ b/docs/img/point_spread.png
Binary files differ
diff --git a/docs/img/pop_last_before.png b/docs/img/pop_last_before.png
deleted file mode 100644
index 15d02a0..0000000
--- a/docs/img/pop_last_before.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/pop_last_until.png b/docs/img/pop_last_until.png
deleted file mode 100644
index 94b0c37..0000000
--- a/docs/img/pop_last_until.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/pose_estimation.png b/docs/img/pose_estimation.png
deleted file mode 100644
index d814575..0000000
--- a/docs/img/pose_estimation.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/scan_path.png b/docs/img/scan_path.png
index 1c77598..72af153 100644
--- a/docs/img/scan_path.png
+++ b/docs/img/scan_path.png
Binary files differ
diff --git a/docs/img/scene.png b/docs/img/scene.png
index 818c301..e7edd88 100644
--- a/docs/img/scene.png
+++ b/docs/img/scene.png
Binary files differ
diff --git a/docs/img/timestamped_gaze_positions.png b/docs/img/timestamped_gaze_positions.png
index cc08ec0..c639019 100644
--- a/docs/img/timestamped_gaze_positions.png
+++ b/docs/img/timestamped_gaze_positions.png
Binary files differ
diff --git a/docs/img/vision_cone.png b/docs/img/vision_cone.png
deleted file mode 100644
index 19c5583..0000000
--- a/docs/img/vision_cone.png
+++ /dev/null
Binary files differ
diff --git a/docs/img/visualisation.png b/docs/img/visualisation.png
new file mode 100644
index 0000000..9076e7e
--- /dev/null
+++ b/docs/img/visualisation.png
Binary files differ
diff --git a/docs/index.md b/docs/index.md
index 2306490..00e2e29 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -18,13 +18,13 @@ First of all, **ArGaze** provides extensible modules library allowing to select
* **Area Of Interest (AOI) matching**: focus point inside, deviation circle coverage, ...
* **Scan path analysis**: transition matrix, entropy, exploit/explore ratio, ...
-Once incoming data formatted as required, all those gaze analysis features can be used with any screen-based eye tracker devices.
+Once incoming data are formatted as required, all those gaze analysis features can be used with any screen-based eye tracker devices.
[Learn how to build gaze analysis pipelines for various use cases by reading user guide dedicated section](./user_guide/gaze_analysis_pipeline/introduction.md).
## Augmented reality based on ArUco markers pipeline
-Things goes harder when gaze data comes from head-mounted eye tracker devices. That's why **ArGaze** provides **Augmented Reality (AR)** support to map **Areas Of Interest (AOI)** on <a href="https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html" target="_blank">OpenCV ArUco markers</a>.
+Things goes harder when gaze data comes from head-mounted eye tracker devices. That's why **ArGaze** provides **Augmented Reality (AR)** support to map **Areas Of Interest (AOI)** on [OpenCV ArUco markers](https://www.sciencedirect.com/science/article/abs/pii/S0031320314000235).
![ArUco pipeline axis](img/aruco_pipeline_axis.png)
diff --git a/docs/user_guide/ar_environment/environment_exploitation.md b/docs/user_guide/ar_environment/environment_exploitation.md
deleted file mode 100644
index 9e4b236..0000000
--- a/docs/user_guide/ar_environment/environment_exploitation.md
+++ /dev/null
@@ -1,36 +0,0 @@
-Environment exploitation
-========================
-
-Once loaded, [ArCamera](../../argaze.md/#argaze.ArFeatures.ArCamera) assets can be exploited as illustrated below:
-
-```python
-# Access to AR environment ArUco detector passing it a image where to detect ArUco markers
-ar_camera.aruco_detector.detect_markers(image)
-
-# Access to an AR environment scene
-my_first_scene = ar_camera.scenes['my first AR scene']
-
-try:
-
- # Try to estimate AR scene pose from detected markers
- tvec, rmat, consistent_markers = my_first_scene.estimate_pose(ar_camera.aruco_detector.detected_markers)
-
- # Project AR scene into camera image according estimated pose
- # Optional visual_hfov argument is set to 160° to clip AOI scene according a cone vision
- aoi2D_scene = my_first_scene.project(tvec, rmat, visual_hfov=160)
-
- # Draw estimated AR scene axis
- my_first_scene.draw_axis(image)
-
- # Draw AOI2D scene projection
- aoi2D_scene.draw(image)
-
- # Do something with AOI2D scene projection
- ...
-
-# Catch exceptions raised by estimate_pose and project methods
-except (ArFeatures.PoseEstimationFailed, ArFeatures.SceneProjectionFailed) as e:
-
- print(e)
-
-```
diff --git a/docs/user_guide/ar_environment/environment_setup.md b/docs/user_guide/ar_environment/environment_setup.md
deleted file mode 100644
index 1f26d26..0000000
--- a/docs/user_guide/ar_environment/environment_setup.md
+++ /dev/null
@@ -1,77 +0,0 @@
-Environment Setup
-=================
-
-[ArCamera](../../argaze.md/#argaze.ArFeatures.ArCamera) setup is loaded from JSON file format.
-
-Each [ArCamera](../../argaze.md/#argaze.ArFeatures.ArCamera) defines a unique [ArUcoDetector](../../argaze.md/#argaze.ArUcoMarkers.ArUcoDetector.ArUcoDetector) dedicated to detection of markers from a specific [ArUcoMarkersDictionary](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarkersDictionary) and with a given size. However, it is possible to load multiple [ArScene](../../argaze.md/#argaze.ArFeatures.ArScene) into a same [ArCamera](../../argaze.md/#argaze.ArFeatures.ArCamera).
-
-Here is JSON environment file example where it is assumed that mentioned .obj files are located relatively to the environment file on disk.
-
-```
-{
- "name": "my AR environment",
- "aruco_detector": {
- "dictionary": {
- "name": "DICT_APRILTAG_16h5"
- }
- "marker_size": 5,
- "optic_parameters": {
- "rms": 0.6,
- "dimensions": [
- 1920,
- 1080
- ],
- "K": [
- [
- 1135,
- 0.0,
- 956
- ],
- [
- 0.0,
- 1135,
- 560
- ],
- [
- 0.0,
- 0.0,
- 1.0
- ]
- ],
- "D": [
- 0.01655492265003404,
- 0.1985524264972037,
- 0.002129965902489484,
- -0.0019528582922179365,
- -0.5792910353639452
- ]
- },
- "parameters": {
- "cornerRefinementMethod": 3,
- "aprilTagQuadSigma": 2,
- "aprilTagDeglitch": 1
- }
- },
- "scenes": {
- "my first AR scene" : {
- "aruco_markers_group": "./first_scene/markers.obj",
- "aoi_scene": "./first_scene/aoi.obj",
- "angle_tolerance": 15.0,
- "distance_tolerance": 2.54
- },
- "my second AR scene" : {
- "aruco_markers_group": "./second_scene/markers.obj",
- "aoi_scene": "./second_scene/aoi.obj",
- "angle_tolerance": 15.0,
- "distance_tolerance": 2.54
- }
- }
-}
-```
-
-```python
-from argaze import ArFeatures
-
-# Load AR environment
-ar_camera = ArFeatures.ArCamera.from_json('./environment.json')
-```
diff --git a/docs/user_guide/ar_environment/introduction.md b/docs/user_guide/ar_environment/introduction.md
deleted file mode 100644
index b19383b..0000000
--- a/docs/user_guide/ar_environment/introduction.md
+++ /dev/null
@@ -1,6 +0,0 @@
-AR environment setup
-====================
-
-ArGaze toolkit eases ArUco and AOI management in a single AR environment setup.
-
-This section refers to [ArFeatures](../../argaze.md/#argaze.ArFeatures).
diff --git a/docs/user_guide/areas_of_interest/aoi_matching.md b/docs/user_guide/areas_of_interest/aoi_matching.md
deleted file mode 100644
index 60467f9..0000000
--- a/docs/user_guide/areas_of_interest/aoi_matching.md
+++ /dev/null
@@ -1,48 +0,0 @@
----
-title: AOI matching
----
-
-AOI matching
-============
-
-Once [AOI3DScene](../../argaze.md/#argaze.AreaOfInterest.AOI3DScene) is projected as [AOI2DScene](../../argaze.md/#argaze.AreaOfInterest.AOI2DScene), it could be needed to know which AOI is looked.
-
-The [AreaOfInterest](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) class in [AOIFeatures](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures) provides two ways to accomplish such task.
-
-## Pointer-based matching
-
-Test if 2D pointer is inside or not AOI using contains_point() method as illustrated below.
-
-![Contains point](../../img/contains_point.png)
-
-``` python
-pointer = (x, y)
-
-for name, aoi in aoi2D_scene.items():
-
- if aoi.contains_point(pointer):
-
- # Do something with looked aoi
- ...
-
-```
-
-It is also possible to get where a pointer is looking inside an AOI provided that AOI is a rectangular plane:
-
-``` python
-
-inner_x, inner_y = aoi.inner_axis(pointer)
-
-```
-
-## Circle-based matching
-
-As positions have limited accuracy, it is possible to define a radius around a pointer to test circle intersection with AOI.
-
-![Circle intersection](../../img/circle_intersection.png)
-
-``` python
-
-intersection_shape, intersection_aoi_ratio, intersection_circle_ratio = aoi.circle_intersection(pointer, radius)
-
-```
diff --git a/docs/user_guide/areas_of_interest/aoi_scene_description.md b/docs/user_guide/areas_of_interest/aoi_scene_description.md
deleted file mode 100644
index b96c1e0..0000000
--- a/docs/user_guide/areas_of_interest/aoi_scene_description.md
+++ /dev/null
@@ -1,83 +0,0 @@
----
-title: AOI scene description
----
-
-AOI scene description
-=====================
-
-## 2D description
-
-An AOI scene can be described in 2D dimension using an [AOI2DScene](../../argaze.md/#argaze.AreaOfInterest.AOI2DScene) from a dictionary description.
-
-``` dict
-{
- "tracking": [[672.0, 54.0], [1632.0, 54.0], [1632.0, 540.0], [672.0, 540.0]],
- "system": [[0.0, 54.0], [672.0, 54.0], [672.0, 540.0], [0.0, 540.0]],
- "communications": [[0.0, 594.0], [576.0, 594.0], [576.0, 1080.0], [0.0, 1080.0]],
- "resources": [[576.0, 594.0], [1632.0, 594.0], [1632.0, 1080.0], [576.0, 1080.0]]
-}
-...
-```
-
-Here is a sample of code to show the loading of an [AOI2DScene](../../argaze.md/#argaze.AreaOfInterest.AOI2DScene) from a dictionary description:
-
-
-``` python
-from argaze.AreaOfInterest import AOI2DScene
-
-# Load an AOI2D scene from dictionary
-aoi_2d_scene = AOI2DScene.AOI2DScene(aoi_scene_dictionary)
-```
-
-## 3D description
-
-An AOI scene can be described in 3D dimension using an [AOI3DScene](../../argaze.md/#argaze.AreaOfInterest.AOI3DScene) built from a 3D model with all AOI as 3D planes and loaded through OBJ file format.
-Notice that plane normals are not needed and planes are not necessary 4 vertices shapes.
-
-``` obj
-o PIC_ND
-v 6.513238 -27.113548 -25.163900
-v 22.994461 -27.310783 -24.552130
-v 6.718690 -6.467261 -26.482569
-v 23.252594 -6.592890 -25.873484
-f 1 2 4 3
-o PIC_ND_Aircraft
-v 6.994747 -21.286463 -24.727146
-v 22.740919 -21.406120 -24.147078
-v 7.086208 -12.096219 -25.314123
-v 22.832380 -12.215876 -24.734055
-f 5 6 8 7
-o PIC_ND_Wind
-v 7.086199 -11.769333 -25.335127
-v 12.081032 -11.807289 -25.151123
-v 7.115211 -8.854101 -25.521320
-v 12.110044 -8.892057 -25.337317
-f 9 10 12 11
-o PIC_ND_Waypoint
-v 17.774197 -11.819057 -24.943428
-v 22.769030 -11.857013 -24.759424
-v 17.803209 -8.903825 -25.129622
-v 22.798042 -8.941781 -24.945618
-f 13 14 16 15
-...
-o Thrust_Lever
-v 19.046124 15.523837 4.774072
-v 18.997263 -0.967944 5.701000
-v 18.988382 15.923470 -13.243046
-v 18.921808 -0.417994 -17.869610
-v 19.032232 19.241346 -3.040264
-v 19.020988 6.392717 5.872663
-v 18.945322 6.876906 -17.699480
-s off
-f 185 190 186 188 191 187 189
-...
-```
-
-Here is a sample of code to show the loading of an [AOI3DScene](../../argaze.md/#argaze.AreaOfInterest.AOI3DScene) from an OBJ file description:
-
-``` python
-from argaze.AreaOfInterest import AOI3DScene
-
-# Load an AOI3D scene from OBJ file
-aoi_3d_scene = AOI3DScene.AOI3DScene.from_obj('./aoi_scene.obj')
-```
diff --git a/docs/user_guide/areas_of_interest/aoi_scene_projection.md b/docs/user_guide/areas_of_interest/aoi_scene_projection.md
deleted file mode 100644
index f348c6c..0000000
--- a/docs/user_guide/areas_of_interest/aoi_scene_projection.md
+++ /dev/null
@@ -1,22 +0,0 @@
----
-title: AOI scene projection
----
-
-AOI scene projection
-====================
-
-An [AOI3DScene](../../argaze.md/#argaze.AreaOfInterest.AOI3DScene) can be rotated and translated according to a pose estimation before to project it onto camera image as an [AOI2DScene](../../argaze.md/#argaze.AreaOfInterest.AOI2DScene).
-
-![AOI projection](../../img/aoi_projection.png)
-
-``` python
-...
-
-# Assuming pose estimation is done (tvec and rmat)
-
-# Project AOI 3D scene according pose estimation and optic parameters
-aoi2D_scene = aoi3D_scene.project(tvec, rmat, optic_parameters.K)
-
-# Draw AOI 2D scene
-aoi2D_scene.draw(image)
-```
diff --git a/docs/user_guide/areas_of_interest/heatmap.md b/docs/user_guide/areas_of_interest/heatmap.md
deleted file mode 100644
index 450c033..0000000
--- a/docs/user_guide/areas_of_interest/heatmap.md
+++ /dev/null
@@ -1,40 +0,0 @@
----
-title: Heatmap
----
-
-Heatmap
-=========
-
-[AOIFeatures](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures) provides [Heatmap](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.Heatmap) class to draw heatmap image.
-
-## Point spread
-
-The **point_spread** method draw a gaussian point spread into heatmap image at a given pointer position.
-
-![Point spread](../../img/point_spread.png)
-
-## Heatmap
-
-Heatmap visualisation allows to show where a pointer is most of the time.
-
-![Heatmap](../../img/heatmap.png)
-
-```python
-from argaze.AreaOfInterest import AOIFeatures
-
-# Create heatmap of 800px * 600px resolution
-heatmap = AOIFeatures.Heatmap((800, 600))
-
-# Initialize heatmap
-heatmap.init()
-
-# Assuming a pointer position (x, y) is moving inside frame
-...:
-
- # Update heatmap at pointer position
- heatmap.update((x, y), sigma=0.05)
-
- # Do something with heatmap image
- ... heatmap.image
-
-``` \ No newline at end of file
diff --git a/docs/user_guide/areas_of_interest/introduction.md b/docs/user_guide/areas_of_interest/introduction.md
deleted file mode 100644
index 6f74dd4..0000000
--- a/docs/user_guide/areas_of_interest/introduction.md
+++ /dev/null
@@ -1,8 +0,0 @@
-About Areas Of Interest (AOI)
-=============================
-
-The [AreaOfInterest submodule](../../argaze.md/#argaze.AreaOfInterest) allows to deal with AOI in a AR environment through a set of high level classes:
-
-* [AOIFeatures](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures)
-* [AOI3DScene](../../argaze.md/#argaze.AreaOfInterest.AOI3DScene)
-* [AOI2DScene](../../argaze.md/#argaze.AreaOfInterest.AOI2DScene) \ No newline at end of file
diff --git a/docs/user_guide/areas_of_interest/vision_cone_filtering.md b/docs/user_guide/areas_of_interest/vision_cone_filtering.md
deleted file mode 100644
index 7b29642..0000000
--- a/docs/user_guide/areas_of_interest/vision_cone_filtering.md
+++ /dev/null
@@ -1,18 +0,0 @@
-Vision cone filtering
-=====================
-
-The [AOI3DScene](../../argaze.md/#argaze.AreaOfInterest.AOI3DScene) provides cone clipping support in order to select only [AOI](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) which are inside vision cone field.
-
-![Vision cone](../../img/vision_cone.png)
-
-``` python
-# Transform scene into camera referential
-aoi3D_camera = aoi3D_scene.transform(tvec, rmat)
-
-# Get aoi inside vision cone field
-# The vision cone tip is positionned behind the head
-aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(cone_radius=300, cone_height=150, cone_tip=[0., 0., -20.])
-
-# Keep only aoi inside vision cone field
-aoi3D_scene = aoi3D_scene.copy(exclude=aoi3D_outside.keys())
-```
diff --git a/docs/user_guide/aruco_markers/dictionary_selection.md b/docs/user_guide/aruco_markers/dictionary_selection.md
deleted file mode 100644
index b9ba510..0000000
--- a/docs/user_guide/aruco_markers/dictionary_selection.md
+++ /dev/null
@@ -1,17 +0,0 @@
-Dictionary selection
-====================
-
-ArUco markers always belongs to a set of markers called ArUco markers dictionary.
-
-![ArUco dictionaries](../../img/aruco_dictionaries.png)
-
-Many ArUco dictionaries exist with properties concerning the format, the number of markers or the difference between each markers to avoid error in tracking.
-
-Here is the documention [about ArUco markers dictionaries](https://docs.opencv.org/3.4/d9/d6a/group__aruco.html#gac84398a9ed9dd01306592dd616c2c975).
-
-``` python
-from argaze.ArUcoMarkers import ArUcoMarkersDictionary
-
-# Create a dictionary of specific April tags
-aruco_dictionary = ArUcoMarkersDictionary.ArUcoMarkersDictionary('DICT_APRILTAG_16h5')
-```
diff --git a/docs/user_guide/aruco_markers/introduction.md b/docs/user_guide/aruco_markers/introduction.md
deleted file mode 100644
index 9d78de0..0000000
--- a/docs/user_guide/aruco_markers/introduction.md
+++ /dev/null
@@ -1,15 +0,0 @@
-About ArUco markers
-===================
-
-![OpenCV ArUco markers](https://pyimagesearch.com/wp-content/uploads/2020/12/aruco_generate_tags_header.png)
-
-The OpenCV library provides a module to detect fiducial markers into a picture and estimate its pose (cf [OpenCV ArUco tutorial page](https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html)).
-
-The ArGaze [ArUcoMarkers submodule](../../argaze.md/#argaze.ArUcoMarkers) eases markers creation, camera calibration, markers detection and 3D scene pose estimation through a set of high level classes:
-
-* [ArUcoMarkersDictionary](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarkersDictionary)
-* [ArUcoMarkers](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarker)
-* [ArUcoBoard](../../argaze.md/#argaze.ArUcoMarkers.ArUcoBoard)
-* [ArUcoOpticCalibrator](../../argaze.md/#argaze.ArUcoMarkers.ArUcoOpticCalibrator)
-* [ArUcoDetector](../../argaze.md/#argaze.ArUcoMarkers.ArUcoDetector)
-* [ArUcoMarkersGroup](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarkersGroup) \ No newline at end of file
diff --git a/docs/user_guide/aruco_markers/markers_creation.md b/docs/user_guide/aruco_markers/markers_creation.md
deleted file mode 100644
index eab9890..0000000
--- a/docs/user_guide/aruco_markers/markers_creation.md
+++ /dev/null
@@ -1,17 +0,0 @@
-Markers creation
-================
-
-The creation of [ArUcoMarkers](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarker) from a dictionary is illustrated in the code below:
-
-``` python
-from argaze.ArUcoMarkers import ArUcoMarkersDictionary
-
-# Create a dictionary of specific April tags
-aruco_dictionary = ArUcoMarkersDictionary.ArUcoMarkersDictionary('DICT_APRILTAG_16h5')
-
-# Export marker n°5 as 3.5 cm picture with 300 dpi resolution
-aruco_dictionary.create_marker(5, 3.5).save('./markers/', 300)
-
-# Export all dictionary markers as 3.5 cm pictures with 300 dpi resolution
-aruco_dictionary.save('./markers/', 3.5, 300)
-``` \ No newline at end of file
diff --git a/docs/user_guide/aruco_markers/markers_detection.md b/docs/user_guide/aruco_markers/markers_detection.md
deleted file mode 100644
index af2fb4f..0000000
--- a/docs/user_guide/aruco_markers/markers_detection.md
+++ /dev/null
@@ -1,47 +0,0 @@
-Markers detection
-=================
-
-![Detected markers](../../img/detected_markers.png)
-
-Firstly, the [ArUcoDetector](../../argaze.md/#argaze.ArUcoMarkers.ArUcoDetector.ArUcoDetector) needs to know the expected dictionary and size (in centimeter) of the [ArUcoMarkers](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarker) it have to detect.
-
-Notice that extra parameters are passed to detector: see [OpenCV ArUco markers detection parameters documentation](https://docs.opencv.org/4.x/d1/dcd/structcv_1_1aruco_1_1DetectorParameters.html) to know more.
-
-``` python
-from argaze.ArUcoMarkers import ArUcoDetector, ArUcoOpticCalibrator
-
-# Assuming camera calibration data are loaded
-
-# Loading extra detector parameters
-extra_parameters = ArUcoDetector.DetectorParameters.from_json('./detector_parameters.json')
-
-# Create ArUco detector to track DICT_APRILTAG_16h5 5cm length markers
-aruco_detector = ArUcoDetector.ArUcoDetector(optic_parameters=optic_parameters, dictionary='DICT_APRILTAG_16h5', marker_size=5, parameters=extra_parameters)
-```
-
-Here is [DetectorParameters](../../argaze.md/#argaze.ArUcoMarkers.ArUcoDetector.DetectorParameters) JSON file example:
-
-```
-{
- "cornerRefinementMethod": 1,
- "aprilTagQuadSigma": 2,
- "aprilTagDeglitch": 1
-}
-```
-
-The [ArUcoDetector](../../argaze.md/#argaze.ArUcoMarkers.ArUcoDetector.ArUcoDetector) processes image to detect markers and allows to draw detection results onto it:
-
-``` python
-# Detect markers into image and draw them
-aruco_detector.detect_markers(image)
-aruco_detector.draw_detected_markers(image)
-
-# Get corners position into image related to each detected markers
-for marker_id, marker in aruco_detector.detected_markers.items():
-
- print(f'marker {marker_id} corners: ', marker.corners)
-
- # Do something with detected marker i corners
- ...
-
-```
diff --git a/docs/user_guide/aruco_markers/markers_pose_estimation.md b/docs/user_guide/aruco_markers/markers_pose_estimation.md
deleted file mode 100644
index 487c220..0000000
--- a/docs/user_guide/aruco_markers/markers_pose_estimation.md
+++ /dev/null
@@ -1,20 +0,0 @@
-Markers pose estimation
-=======================
-
-After [ArUcoMarkers](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarker) detection, it is possible to estimate [ArUcoMarkers](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarker) pose in camera axis.
-
-![Pose estimation](../../img/pose_estimation.png)
-
-``` python
-# Estimate markers pose
-aruco_detector.estimate_markers_pose()
-
-# Get pose estimation related to each detected markers
-for marker_id, marker in aruco_detector.detected_markers.items():
-
- print(f'marker {marker_id} translation: ', marker.translation)
- print(f'marker {marker_id} rotation: ', marker.rotation)
-
- # Do something with each marker pose estimation
- ...
-``` \ No newline at end of file
diff --git a/docs/user_guide/aruco_markers/markers_scene_description.md b/docs/user_guide/aruco_markers/markers_scene_description.md
deleted file mode 100644
index c6dbf31..0000000
--- a/docs/user_guide/aruco_markers/markers_scene_description.md
+++ /dev/null
@@ -1,146 +0,0 @@
-Markers scene description
-=========================
-
-The ArGaze toolkit provides [ArUcoMarkersGroup](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarkersGroup) class to describe where [ArUcoMarkers](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarker) are placed into a 3D model.
-
-![ArUco scene](../../img/aruco_markers_group.png)
-
-[ArUcoMarkersGroup](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarkersGroup) is useful to:
-
-* filter markers that belongs to this predefined scene,
-* check the consistency of detected markers according the place where each marker is expected to be,
-* estimate the pose of the scene from the pose of detected markers.
-
-## Scene creation
-
-### from OBJ
-
-ArUco scene description uses common OBJ file format that can be exported from most 3D editors. Notice that plane normals (vn) needs to be exported.
-
-``` obj
-o DICT_APRILTAG_16h5#0_Marker
-v -3.004536 0.022876 2.995370
-v 2.995335 -0.015498 3.004618
-v -2.995335 0.015498 -3.004618
-v 3.004536 -0.022876 -2.995370
-vn 0.0064 1.0000 -0.0012
-s off
-f 1//1 2//1 4//1 3//1
-o DICT_APRILTAG_16h5#1_Marker
-v -33.799068 46.450645 -32.200436
-v -27.852505 47.243549 -32.102116
-v -34.593925 52.396473 -32.076626
-v -28.647360 53.189377 -31.978306
-vn -0.0135 -0.0226 0.9997
-s off
-f 5//2 6//2 8//2 7//2
-...
-```
-
-Here is a sample of code to show the loading of an [ArUcoMarkersGroup](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarkersGroup) OBJ file description:
-
-``` python
-from argaze.ArUcoMarkers import ArUcoMarkersGroup
-
-# Create an ArUco scene from a OBJ file description
-aruco_markers_group = ArUcoMarkersGroup.ArUcoMarkersGroup.from_obj('./markers.obj')
-
-# Print loaded marker places
-for place_id, place in aruco_markers_group.places.items():
-
- print(f'place {place_id} for marker: ', place.marker.identifier)
- print(f'place {place_id} translation: ', place.translation)
- print(f'place {place_id} rotation: ', place.rotation)
-```
-
-### from JSON
-
-[ArUcoMarkersGroup](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarkersGroup) description can also be written in a JSON file format.
-
-``` json
-{
- "dictionary": "DICT_ARUCO_ORIGINAL",
- "marker_size": 1,
- "places": {
- "0": {
- "translation": [0, 0, 0],
- "rotation": [0, 0, 0]
- },
- "1": {
- "translation": [10, 10, 0],
- "rotation": [0, 0, 0]
- },
- "2": {
- "translation": [0, 10, 0],
- "rotation": [0, 0, 0]
- }
- }
-}
-```
-
-### from detected markers
-
-Here is a more advanced usage where ArUco scene is built from markers detected into an image:
-
-``` python
-from argaze.ArUcoMarkers import ArUcoMarkersGroup
-
-# Assuming markers have been detected and their pose estimated thanks to ArUcoDetector
-...
-
-# Build ArUco scene from detected markers
-aruco_markers_group = ArUcoMarkersGroup.ArUcoMarkersGroup(aruco_detector.marker_size, aruco_detector.dictionary, aruco_detector.detected_markers)
-```
-
-## Markers filtering
-
-Considering markers are detected, here is how to filter them to consider only those which belongs to the scene:
-
-``` python
-scene_markers, remaining_markers = aruco_markers_group.filter_markers(aruco_detector.detected_markers)
-```
-
-## Marker poses consistency
-
-Then, scene markers poses can be validated by verifying their spatial consistency considering angle and distance tolerance. This is particularly useful to discard ambiguous marker pose estimations when markers are parallel to camera plane (see [issue on OpenCV Contribution repository](https://github.com/opencv/opencv_contrib/issues/3190#issuecomment-1181970839)).
-
-``` python
-# Check scene markers consistency with 10° angle tolerance and 1 cm distance tolerance
-consistent_markers, unconsistent_markers, unconsistencies = aruco_markers_group.check_markers_consistency(scene_markers, 10, 1)
-```
-
-## Scene pose estimation
-
-Several approaches are available to perform [ArUcoMarkersGroup](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarkersGroup) pose estimation from markers belonging to the scene.
-
-The first approach considers that scene pose can be estimated **from a single marker pose**:
-
-``` python
-# Let's select one consistent scene marker
-marker_id, marker = consistent_markers.popitem()
-
-# Estimate scene pose from a single marker
-tvec, rmat = self.aruco_markers_group.estimate_pose_from_single_marker(marker)
-```
-
-The second approach considers that scene pose can be estimated by **averaging several marker poses**:
-
-``` python
-# Estimate scene pose from all consistent scene markers
-tvec, rmat = self.aruco_markers_group.estimate_pose_from_markers(consistent_markers)
-```
-
-The third approach is only available when ArUco markers are placed in such a configuration that is possible to **define orthogonal axis**:
-
-``` python
-tvec, rmat = self.aruco_markers_group.estimate_pose_from_axis_markers(origin_marker, horizontal_axis_marker, vertical_axis_marker)
-```
-
-## Scene exportation
-
-As ArUco scene can be exported to OBJ file description to import it into most 3D editors.
-
-``` python
-# Export an ArUco scene as OBJ file description
-aruco_markers_group.to_obj('markers.obj')
-```
diff --git a/docs/user_guide/aruco_markers_pipeline/advanced_topics/aruco_detector_configuration.md b/docs/user_guide/aruco_markers_pipeline/advanced_topics/aruco_detector_configuration.md
new file mode 100644
index 0000000..f5b66c6
--- /dev/null
+++ b/docs/user_guide/aruco_markers_pipeline/advanced_topics/aruco_detector_configuration.md
@@ -0,0 +1,40 @@
+Improve ArUco markers detection
+===============================
+
+As explain in [OpenCV ArUco documentation](https://docs.opencv.org/4.x/d1/dcd/structcv_1_1aruco_1_1DetectorParameters.html), ArUco markers detection is highly configurable.
+
+## Load ArUcoDetector parameters
+
+[ArUcoCamera.detector.parameters](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoDetector.Parameters) can be loaded thanks to a dedicated JSON entry.
+
+Here is an extract from the JSON [ArUcoCamera](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) configuration file with ArUco detector parameters:
+
+```json
+{
+ "name": "My FullHD camera",
+ "size": [1920, 1080],
+ "aruco_detector": {
+ "dictionary": "DICT_APRILTAG_16h5",
+ "marker_size": 5,
+ "parameters": {
+ "cornerRefinementMethod": 3,
+ "aprilTagQuadSigma": 2,
+ "aprilTagDeglitch": 1,
+ "useAruco3Detection": 1
+ }
+ },
+ ...
+```
+
+## Print ArUcoDetector parameters
+
+```python
+# Assuming ArUcoCamera is loaded
+...
+
+# Print all ArUcoDetector parameters
+print(aruco_camera.aruco_detector.parameters)
+
+# Print only modified ArUcoDetector parameters
+print(f'{aruco_camera.aruco_detector.parameters:modified}')
+```
diff --git a/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md b/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md
index 455d95a..3277216 100644
--- a/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md
+++ b/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md
@@ -3,11 +3,11 @@ Calibrate optic parameters
A camera device have to be calibrated to compensate its optical distorsion.
-![Optic parameters calibration](../../img/optic_calibration.png)
+![Optic parameters calibration](../../../img/optic_calibration.png)
## Print calibration board
-The first step to calibrate a camera is to create an [ArUcoBoard](../../argaze.md/#argaze.ArUcoMarkers.ArUcoBoard) like in the code below:
+The first step to calibrate a camera is to create an [ArUcoBoard](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoBoard) like in the code below:
``` python
from argaze.ArUcoMarkers import ArUcoMarkersDictionary, ArUcoBoard
@@ -29,9 +29,9 @@ Let's print the calibration board before to go further.
## Capture board pictures
-Then, the calibration process needs to make many different captures of an [ArUcoBoard](../../argaze.md/#argaze.ArUcoMarkers.ArUcoBoard) through the camera and then, pass them to an [ArUcoDetector](../../argaze.md/#argaze.ArUcoMarkers.ArUcoDetector.ArUcoDetector) instance to detect board corners and store them as calibration data into an [ArUcoOpticCalibrator](../../argaze.md/#argaze.ArUcoMarkers.ArUcoOpticCalibrator) for final calibration process.
+Then, the calibration process needs to make many different captures of an [ArUcoBoard](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoBoard) through the camera and then, pass them to an [ArUcoDetector](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoDetector.ArUcoDetector) instance to detect board corners and store them as calibration data into an [ArUcoOpticCalibrator](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoOpticCalibrator) for final calibration process.
-![Calibration step](../../img/optic_calibration_step.png)
+![Calibration step](../../../img/optic_calibration_step.png)
The sample of code below illustrates how to:
@@ -131,3 +131,61 @@ Below, an optic_parameters JSON file example:
]
}
```
+
+## Load and display optic parameters
+
+[ArUcoCamera.detector.optic_parameters](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoOpticCalibrator.OpticParameters) can be enabled thanks to a dedicated JSON entry.
+
+Here is an extract from the JSON [ArUcoCamera](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) configuration file where optic parameters are loaded and displayed:
+
+```json
+{
+ "name": "My FullHD Camera",
+ "size": [1920, 1080],
+ "aruco_detector": {
+ "dictionary": "DICT_APRILTAG_16h5",
+ "marker_size": 5,
+ "optic_parameters": {
+ "rms": 0.6688921504088245,
+ "dimensions": [
+ 1920,
+ 1080
+ ],
+ "K": [
+ [
+ 1135.6524381415752,
+ 0.0,
+ 956.0685325355497
+ ],
+ [
+ 0.0,
+ 1135.9272506869524,
+ 560.059099810324
+ ],
+ [
+ 0.0,
+ 0.0,
+ 1.0
+ ]
+ ],
+ "D": [
+ 0.01655492265003404,
+ 0.1985524264972037,
+ 0.002129965902489484,
+ -0.0019528582922179365,
+ -0.5792910353639452
+ ]
+ }
+ },
+ ...
+ "image_parameters": {
+ ...
+ "draw_optic_parameters_grid": {
+ "width": 192,
+ "height": 108,
+ "z": 100,
+ "point_size": 1,
+ "point_color": [0, 0, 255]
+ }
+ }
+``` \ No newline at end of file
diff --git a/docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md b/docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md
new file mode 100644
index 0000000..892d6dd
--- /dev/null
+++ b/docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md
@@ -0,0 +1,136 @@
+Script the pipeline
+===================
+
+All aruco markers pipeline objects are accessible from Python script.
+This could be particularly useful for realtime AR interaction applications.
+
+## Load ArUcoCamera configuration from dictionary
+
+First of all, [ArUcoCamera](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) configuration can be loaded from a python dictionary.
+
+```python
+from argaze.ArUcoMarkers import ArUcoCamera
+
+# Edit a dict with ArUcoCamera configuration
+configuration = {
+ "name": "My FullHD camera",
+ "size": (1920, 1080),
+ ...
+ "aruco_detector": {
+ ...
+ },
+ "scenes": {
+ "MyScene" : {
+ "aruco_markers_group": {
+ ...
+ },
+ "layers": {
+ "MyLayer": {
+ "aoi_scene": {
+ ...
+ }
+ },
+ ...
+ }
+ },
+ ...
+ }
+ "layers": {
+ "MyLayer": {
+ ...
+ },
+ ...
+ },
+ "image_parameters": {
+ ...
+ }
+}
+
+# Load ArUcoCamera
+aruco_camera = ArUcoCamera.ArUcoCamera.from_dict(configuration)
+
+# Do something with ArUcoCamera
+...
+```
+
+## Access to ArUcoCamera and ArScenes attributes
+
+Then, once the configuration is loaded, it is possible to access to its attributes: [read ArUcoCamera code reference](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) to get a complete list of what is available.
+
+Thus, the [ArUcoCamera.scenes](../../../argaze.md/#argaze.ArFeatures.ArCamera) attribute allows to access each loaded aruco scene and so, access to their attributes: [read ArUcoScene code reference](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) to get a complete list of what is available.
+
+```python
+from argaze import ArFeatures
+
+# Assuming the ArUcoCamera is loaded
+...
+
+# Iterate over each ArUcoCamera scene
+for name, aruco_scene in aruco_camera.scenes.items():
+ ...
+```
+
+## Pipeline execution outputs
+
+[ArUcoCamera.watch](../../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method returns data about pipeline execution.
+
+```python
+# Assuming that images are available
+...:
+
+ # Watch image with ArUco camera
+ detection_time, projection_time, exception = aruco_camera.watch(image)
+
+ # Do something with pipeline times
+ ...
+
+ # Do something with pipeline exception
+ if exception:
+ ...
+```
+
+Let's understand the meaning of each returned data.
+
+### *detection_time*
+
+ArUco marker detection time in ms.
+
+### *projection_time*
+
+Scenes projection time in ms.
+
+### *exception*
+
+A [python Exception](https://docs.python.org/3/tutorial/errors.html#exceptions) object raised during pipeline execution.
+
+## Setup ArUcoCamera image parameters
+
+Specific [ArUcoCamera.image](../../../argaze.md/#argaze.ArFeatures.ArFrame.image) method parameters can be configured thanks to a python dictionary.
+
+```python
+# Assuming ArUcoCamera is loaded
+...
+
+# Edit a dict with ArUcoCamera image parameters
+image_parameters = {
+ "draw_detected_markers": {
+ ...
+ },
+ "draw_scenes": {
+ ...
+ },
+ "draw_optic_parameters_grid": {
+ ...
+ },
+ ...
+}
+
+# Pass image parameters to ArUcoCamera
+aruco_camera_image = aruco_camera.image(**image_parameters)
+
+# Do something with ArUcoCamera image
+...
+```
+
+!!! note
+ [ArUcoCamera](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) inherits from [ArFrame](../../../argaze.md/#argaze.ArFeatures.ArFrame) and so, benefits from all image parameters described in [gaze analysis pipeline visualisation section](../../gaze_analysis_pipeline/visualisation.md). \ No newline at end of file
diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md
new file mode 100644
index 0000000..b02bc9e
--- /dev/null
+++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md
@@ -0,0 +1,53 @@
+Describe 3D AOI
+===============
+
+Now [scene pose is estimated](aruco_markers_description.md) thanks to ArUco markers description, [areas of interest (AOI)](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) need to be described into the same 3D referential.
+
+In the example scene, the screen and the sheet are considered as areas of interest.
+
+![3D AOI description](../../img/aoi_3d_description.png)
+
+All AOI need to be described from same origin than markers in a [right-handed 3D axis](https://robotacademy.net.au/lesson/right-handed-3d-coordinate-frame/) where:
+
+* +X is pointing to the right,
+* +Y is pointing to the top,
+* +Z is pointing to the backward.
+
+!!! warning
+ All AOI spatial values must be given in **centimeters**.
+
+### Edit OBJ file description
+
+OBJ file format could be exported from most 3D editors.
+
+``` obj
+o Sheet
+v 14.200000 -3.000000 28.350000
+v 35.200000 -3.000000 28.350000
+v 14.200000 -3.000000 -1.35
+v 35.200000 -3.000000 -1.35
+f 1 2 4 3
+o Screen
+v 2.750000 2.900000 -0.500000
+v 49.250000 2.900000 -0.500000
+v 2.750000 29.100000 -0.500000
+v 49.250000 29.100000 -0.500000
+f 5 6 8 7
+```
+
+Here are common OBJ file features needed to describe AOI:
+
+* Object lines (starting with *o* key) indicate AOI name.
+* Vertice lines (starting with *v* key) indicate AOI vertices.
+* Face (starting with *f* key) link vertices together.
+
+### Edit JSON file description
+
+JSON file format allows to describe AOI vertices.
+
+``` json
+{
+ "Sheet": [[14.2, -3, 28.35], [35.2, -3, 28.35], [14.2, -3, -1.35], [35.2, -3, -1.35]],
+ "Screen": [[2.75, 2.9, -0.5], [49.25, 2.9, -0.5], [2.75, 29.1, -0.5], [49.25, 29.1, -0.5]]
+}
+```
diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md
new file mode 100644
index 0000000..f1ae1f6
--- /dev/null
+++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md
@@ -0,0 +1,128 @@
+Define a 3D AOI as a frame
+==========================
+
+When an 3D AOI of the scene contains others coplanar 3D AOI, like a screen with GUI elements displayed on, it is better to described them as 2D AOI inside 2D coordinates system related to the containing 3D AOI.
+
+![3D AOI frame](../../img/aruco_camera_aoi_frame.png)
+
+## Add ArFrame to ArUcoScene
+
+The [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) class defines a rectangular area where timestamped gaze positions are projected in and inside which they need to be analyzed.
+
+Here is the previous extract where "Screen" AOI is defined as a frame into [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) configuration:
+
+```json
+{
+ "name": "My FullHD camera",
+ "size": [1920, 1080],
+ ...
+ "scenes": {
+ "MyScene" : {
+ "aruco_markers_group": {
+ ...
+ },
+ "layers": {
+ "MyLayer": {
+ "aoi_scene": {
+ "Sheet": [[14.2, -3, 28.35], [35.2, -3, 28.35], [14.2, -3, -1.35], [35.2, -3, -1.35]],
+ "Screen": [[2.75, 2.9, -0.5], [49.25, 2.9, -0.5], [2.75, 29.1, -0.5], [49.25, 29.1, -0.5]]
+ }
+ }
+ },
+ "frames": {
+ "Screen": {
+ "size": [1920, 1080],
+ "layers": {
+ "MyLayer": {
+ "aoi_scene": {
+ "GeoSector": [[860, 160], [1380, 100], [1660, 400], [1380, 740], [1440, 960], [920, 920], [680, 800], [640, 560]],
+ "LeftPanel": {
+ "Rectangle": {
+ "x": 0,
+ "y": 0,
+ "width": 350,
+ "height": 1080
+ }
+ },
+ "CircularWidget": {
+ "Circle": {
+ "cx": 1800,
+ "cy": 120,
+ "radius": 80
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ...
+}
+```
+Now, let's understand the meaning of each JSON entry.
+
+### *frames*
+
+An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) instance can contains multiples [ArFrames](../../argaze.md/#argaze.ArFeatures.ArFrame) stored by name.
+
+### Screen
+
+The name of a 3D AOI **and** an [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame). Basically useful for visualisation purpose.
+
+!!! warning "AOI / Frame names policy"
+
+ An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layer 3D AOI is defined as an [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) frame, **provided they have the same name**.
+
+!!! warning "Layer name policy"
+
+ An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) frame layer is projected into [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layer, **provided they have the same name**.
+
+!!! note
+
+ [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) frame layers are projected into their dedicated [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layers when the JSON configuration file is loaded.
+
+## Pipeline execution
+
+### Map ArUcoCamera image into ArUcoScenes frames
+
+After camera image is passed to [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method, it is possible to apply a perpective transformation in order to project watched image into each [ArUcoScenes](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) [frames background](../../argaze.md/#argaze.ArFeatures.ArFrame) image.
+
+```python
+# Assuming that Full HD (1920x1080) video stream or file is opened
+...
+
+# Assuming that the video reading is handled in a looping code block
+...:
+
+ # Capture image from video stream of file
+ image = video_capture.read()
+
+ # Detect ArUco markers, estimate scene pose then, project 3D AOI into camera frame
+ aruco_camera.watch(image)
+
+ # Map watched image into ArUcoScenes frames background
+ aruco_camera.map()
+```
+
+### Analyse timestamped gaze positions into ArUcoScenes frames
+
+[ArUcoScenes](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) frames benefits from all the services described in [gaze analysis pipeline section](../gaze_analysis_pipeline/introduction.md).
+
+!!! note
+
+ Timestamped gaze positions passed to [ArUcoCamera.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method are projected into [ArUcoScenes](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) frames if applicable.
+
+### Display each ArUcoScenes frames
+
+All [ArUcoScenes](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) frames image can be displayed as any [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame).
+
+```python
+ ...
+
+ # Display all ArUcoScenes frames
+ for frame in aruco_camera.scene_frames:
+
+ ... frame.image()
+``` \ No newline at end of file
diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_projection.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md
index 027f805..8c7310b 100644
--- a/docs/user_guide/aruco_markers_pipeline/aoi_projection.md
+++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md
@@ -1,15 +1,15 @@
-Project AOI into camera frame
-=============================
+Project 3D AOI into camera frame
+================================
-Once [ArUcoScene pose is estimated](pose_estimation.md) and [AOI are described](aoi_description.md), AOI can be projected into [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) frame.
+Once [ArUcoScene pose is estimated](pose_estimation.md) and [3D AOI are described](aoi_3d_description.md), AOI can be projected into [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) frame.
-![AOI projection](../../img/aruco_camera_aoi_projection.png)
+![3D AOI projection](../../img/aruco_camera_aoi_projection.png)
-## Add ArLayer to ArUcoScene to load AOI
+## Add ArLayer to ArUcoScene to load 3D AOI
-The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class allows to load areas of interest description. An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) instance can contains multiples [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer).
+The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class allows to load 3D AOI description.
-Here is the previous extract where one layer is added to the [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) configuration:
+Here is the previous extract where one layer is added to [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) configuration:
```json
{
@@ -24,9 +24,8 @@ Here is the previous extract where one layer is added to the [ArUcoScene](../../
"layers": {
"MyLayer": {
"aoi_scene": {
- "YellowSquare": [[6.2, -7.275252, 25.246159], [31.2, -7.275252, 25.246159], [31.2, 1.275252, 1.753843], [6.2, 1.275252, 1.753843]],
- "GrayRectangle": [[2.5, 2.5, -0.5], [37.5, 2.5, -0.5], [37.5, 27.5, -0.5], [2.5, 27.5, -0.5]],
- "BlueTriangle": [[12.5, 7.5, -0.5], [27.5, 7.5, -0.5], [20, 22.5, -0.5]]
+ "Sheet": [[14.2, -3, 28.35], [35.2, -3, 28.35], [14.2, -3, -1.35], [35.2, -3, -1.35]],
+ "Screen": [[2.75, 2.9, -0.5], [49.25, 2.9, -0.5], [2.75, 29.1, -0.5], [49.25, 29.1, -0.5]]
}
}
}
@@ -38,17 +37,21 @@ Here is the previous extract where one layer is added to the [ArUcoScene](../../
Now, let's understand the meaning of each JSON entry.
-### "MyLayer"
+### *layers*
-The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose.
+An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) instance can contains multiples [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer) stored by name.
-### AOI Scene
+### MyLayer
-The [AOIScene](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AOIScene) defines a set of 3D [AreaOfInterest](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) registered by name.
+The name of an [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose.
-## Add ArLayer to ArUcoCamera to project AOI
+### *aoi_scene*
-Here is the previous extract where one layer is added to the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) and displayed:
+The set of 3D AOI into the layer as defined at [3D AOI description chapter](aoi_3d_description.md).
+
+## Add ArLayer to ArUcoCamera to project 3D AOI into
+
+Here is the previous extract where one layer is added to [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) configuration and displayed:
```json
{
@@ -91,21 +94,25 @@ Here is the previous extract where one layer is added to the [ArUcoCamera](../..
Now, let's understand the meaning of each JSON entry.
-### "MyLayer"
+### *layers*
+
+An [ArUcoCamera](../../argaze.md/#argaze.ArFeatures.ArFrame) instance can contains multiples [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer) stored by name.
+
+### MyLayer
-The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose.
+The name of an [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose.
!!! warning "Layer name policy"
- An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layer is projected into [an ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layer, **provided they have the same name**.
+ An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layer is projected into an [ ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layer, **provided they have the same name**.
!!! note
[ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layers are projected into their dedicated [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layers when calling the [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method.
-## Add AOI analysis
+## Add AOI analysis features to ArUcoCamera layer
-When a scene layer is projected into a camera layer, it means that the 3D [ArLayer.aoi_scene](../../argaze.md/#argaze.ArFeatures.ArLayer.aoi_scene) description of the scene becomes the 2D camera's [ArLayer.aoi_scene](../../argaze.md/#argaze.ArFeatures.ArLayer.aoi_scene) description of the camera.
+When a scene layer is projected into a camera layer, it means that the 3D scene's AOI are transformed into 2D camera's AOI.
Therefore, it means that [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) benefits from all the services described in [AOI analysis pipeline section](../gaze_analysis_pipeline/aoi_analysis.md).
@@ -156,4 +163,4 @@ Here is the previous extract where AOI matcher, AOI scan path and AOI scan path
!!! warning
- Adding scan path and scan path analyzers to an [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layer doesn't make sense if the camera is moving.
+ Adding scan path and scan path analyzers to an [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layer doesn't make sense as the space viewed thru camera frame doesn't necessary reflect the space the gaze is covering.
diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_description.md b/docs/user_guide/aruco_markers_pipeline/aoi_description.md
deleted file mode 100644
index 101ec9f..0000000
--- a/docs/user_guide/aruco_markers_pipeline/aoi_description.md
+++ /dev/null
@@ -1,62 +0,0 @@
-Describe AOI
-============
-
-Once [ArUco markers are placed into a scene](aruco_markers_description.md), areas of interest need to be described into the same 3D referential.
-
-In the example scene, each screen is considered as an area of interest more the blue triangle area inside the top screen.
-
-![AOI description](../../img/aoi_description.png)
-
-All AOIs need to be described from same origin than markers in a [right-handed 3D axis](https://robotacademy.net.au/lesson/right-handed-3d-coordinate-frame/) where:
-
-* +X is pointing to the right,
-* +Y is pointing to the top,
-* +Z is pointing to the backward.
-
-!!! warning
- All AOIs spatial values must be given in **centimeters**.
-
-### Edit OBJ file description
-
-OBJ file format could be exported from most 3D editors.
-
-``` obj
-o YellowSquare
-v 6.200003 -7.275252 25.246159
-v 31.200003 -7.275252 25.246159
-v 6.200003 1.275252 1.753843
-v 31.200003 1.275252 1.753843
-s off
-f 1 2 4 3
-o GrayRectangle
-v 2.500000 2.500000 -0.500000
-v 37.500000 2.500000 -0.500000
-v 2.500000 27.500000 -0.500000
-v 37.500000 27.500000 -0.500000
-s off
-f 5 6 8 7
-o BlueTriangle
-v 12.500002 7.500000 -0.500000
-v 27.500002 7.500000 -0.500000
-v 20.000002 22.500000 -0.500000
-s off
-f 9 10 11
-```
-
-Here are common OBJ file features needed to describe AOIs:
-
-* Object lines (starting with *o* key) indicate AOI name.
-* Vertice lines (starting with *v* key) indicate AOI vertices.
-* Face (starting with *f* key) link vertices together.
-
-### Edit JSON file description
-
-JSON file format allows to describe AOIs vertices.
-
-``` json
-{
- "YellowSquare": [[6.2, -7.275252, 25.246159], [31.2, -7.275252, 25.246159], [31.2, 1.275252, 1.753843], [6.2, 1.275252, 1.753843]],
- "GrayRectangle": [[2.5, 2.5, -0.5], [37.5, 2.5, -0.5], [37.5, 27.5, -0.5], [2.5, 27.5, -0.5]],
- "BlueTriangle": [[12.5, 7.5, -0.5], [27.5, 7.5, -0.5], [20, 22.5, -0.5]]
-}
-```
diff --git a/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md b/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md
index 1c13013..6380f88 100644
--- a/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md
+++ b/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md
@@ -3,11 +3,11 @@ Set up ArUco markers
First of all, ArUco markers needs to be printed and placed into the scene.
-Here is an example scene where markers are surrounding a multi-screen workspace with a triangle area inside one of them.
+Here is an example scene where markers are surrounding a workspace with a screen and a sheet on the table (considering the sheet stays static for the moment).
![Scene](../../img/scene.png)
-## Print ArUco markers from a ArUco dictionary
+## Print ArUco markers from an ArUco dictionary
ArUco markers always belongs to a set of markers called ArUco markers dictionary.
@@ -65,24 +65,18 @@ v 0.000000 0.000000 0.000000
v 5.000000 0.000000 0.000000
v 0.000000 5.000000 0.000000
v 5.000000 5.000000 0.000000
-vn 0.0000 0.0000 1.0000
-s off
f 1//1 2//1 4//1 3//1
o DICT_APRILTAG_16h5#1_Marker
-v -1.767767 23.000002 3.767767
-v 1.767767 23.000002 0.232233
-v -1.767767 28.000002 3.767767
-v 1.767767 28.000002 0.232233
-vn 0.7071 0.0000 0.7071
-s off
+v -0.855050 24.000002 4.349232
+v 0.855050 24.000002 -0.349231
+v -0.855050 29.000002 4.349232
+v 0.855050 29.000002 -0.349231
f 5//2 6//2 8//2 7//2
o DICT_APRILTAG_16h5#2_Marker
-v 33.000000 -1.767767 4.767767
-v 38.000000 -1.767767 4.767767
-v 33.000000 1.767767 1.232233
-v 38.000000 1.767767 1.232233
-vn 0.0000 0.7071 0.7071
-s off
+v 44.000000 0.000000 9.500000
+v 49.000000 0.000000 9.500000
+v 44.000000 -0.000000 4.500000
+v 49.000000 -0.000000 4.500000
f 9//3 10//3 12//3 11//3
```
@@ -90,7 +84,6 @@ Here are common OBJ file features needed to describe ArUco markers places:
* Object lines (starting with *o* key) indicate markers dictionary and id by following this format: **DICTIONARY**#**ID**\_Marker.
* Vertice lines (starting with *v* key) indicate markers corners. The marker size will be automatically deducted from the geometry.
-* Plane normals (starting with *vn* key) need to be exported for further pose estimation.
* Face (starting with *f* key) link vertices and normals indexes together.
!!! warning
@@ -110,12 +103,12 @@ JSON file format allows to describe markers places using translation and euler a
"rotation": [0, 0, 0]
},
"1": {
- "translation": [0, 25.5, 2],
- "rotation": [0, 45, 0]
+ "translation": [0, 26.5, 2],
+ "rotation": [0, 70, 0]
},
"2": {
- "translation": [35.5, 0, 3],
- "rotation": [-45, 0, 0]
+ "translation": [46.5, 0, 7],
+ "rotation": [-90, 0, 0]
}
}
}
diff --git a/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md b/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md
index 81c577f..60a1115 100644
--- a/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md
+++ b/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md
@@ -3,7 +3,7 @@ Load and execute pipeline
Once [ArUco markers are placed into a scene](aruco_markers_description.md), they can be detected thanks to [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) class.
-As [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) inherits from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame), the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) class also benefits from all the services described in [gaze analysis pipeline section](./user_guide/gaze_analysis_pipeline/introduction.md).
+As [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) inherits from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame), the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) class also benefits from all the services described in [gaze analysis pipeline section](../gaze_analysis_pipeline/introduction.md).
![ArUco camera frame](../../img/aruco_camera_frame.png)
@@ -29,6 +29,12 @@ Here is a simple JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCa
},
"image_parameters": {
"background_weight": 1,
+ "draw_detected_markers": {
+ "color": [0, 255, 0],
+ "draw_axes": {
+ "thickness": 3
+ }
+ },
"draw_gaze_positions": {
"color": [0, 255, 255],
"size": 2
@@ -40,12 +46,6 @@ Here is a simple JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCa
},
"draw_saccades": {
"line_color": [255, 0, 255]
- },
- "draw_detected_markers": {
- "color": [0, 255, 0],
- "draw_axes": {
- "thickness": 3
- }
}
}
}
@@ -62,15 +62,15 @@ aruco_camera = ArUcoCamera.ArUcoCamera.from_json('./configuration.json')
Now, let's understand the meaning of each JSON entry.
-### Name - *inherited from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame)*
+### *name - inherited from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame)*
The name of the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) frame. Basically useful for visualisation purpose.
-### Size - *inherited from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame)*
+### *size - inherited from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame)*
The size of the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) frame in pixels. Be aware that gaze positions have to be in the same range of value to be projected in.
-### ArUco Detector
+### *aruco_detector*
The first [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) pipeline step is to detect ArUco markers inside input image and estimate their poses.
@@ -81,21 +81,21 @@ The [ArUcoDetector](../../argaze.md/#argaze.ArUcoMarkers.ArUcoDetector) is in ch
!!! warning "Mandatory"
JSON *aruco_detector* entry is mandatory.
-### Gaze Movement Identifier - *inherited from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame)*
+### *gaze_movement_identifier - inherited from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame)*
The first [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline step dedicated to identify fixations or saccades from consecutive timestamped gaze positions.
![Gaze movement identification](../../img/aruco_camera_gaze_movement_identification.png)
-### Image parameters - *inherited from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame)*
+### *image_parameters - inherited from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame)*
-The usual [ArFrame visualisation parameters](./user_guide/gaze_analysis_pipeline/visualisation.md) plus one additional *draw_detected_markers* field.
+The usual [ArFrame visualisation parameters](../gaze_analysis_pipeline/visualisation.md) plus one additional *draw_detected_markers* field.
## Pipeline execution
-### Detect ArUco markers, estimate scene pose and project AOI
+### Detect ArUco markers, estimate scene pose and project 3D AOI
-Pass each camera image to [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method to execute the whole pipeline dedicated to ArUco markers detection, scene pose estimation and AOI projection.
+Pass each camera image to [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method to execute the whole pipeline dedicated to ArUco markers detection, scene pose estimation and 3D AOI projection.
```python
# Assuming that Full HD (1920x1080) video stream or file is opened
@@ -107,19 +107,16 @@ Pass each camera image to [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures
# Capture image from video stream of file
image = video_capture.read()
- # Detect ArUco markers, estimate scene pose then, project AOI into camera frame
+ # Detect ArUco markers, estimate scene pose then, project 3D AOI into camera frame
aruco_camera.watch(image)
- # Display ArUcoCamera frame image to display detected ArUco markers, scene pose, AOI projection and ArFrame visualisation.
+ # Display ArUcoCamera frame image to display detected ArUco markers, scene pose, 2D AOI projection and ArFrame visualisation.
... aruco_camera.image()
```
-!!! warning "Pose estimation error"
- ArUco markers pose estimation algorithm can lead to errors due to geometric ambiguities as explain in [this article](https://ieeexplore.ieee.org/document/1717461). To discard such ambiguous cases, markers should **as less as possible be parallel to camera plan**.
-
### Analyse timestamped gaze positions into camera frame
-As mentioned above, [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) inherits from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) and so, benefits from all the services described in [gaze analysis pipeline section](./user_guide/gaze_analysis_pipeline/introduction.md).
+As mentioned above, [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) inherits from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) and so, benefits from all the services described in [gaze analysis pipeline section](../gaze_analysis_pipeline/introduction.md).
Particularly, timestamped gaze positions can be passed one by one to [ArUcoCamera.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method to execute the whole pipeline dedicated to gaze analysis.
@@ -135,4 +132,4 @@ Particularly, timestamped gaze positions can be passed one by one to [ArUcoCamer
At this point, the [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method only detects ArUco markers and the [ArUcoCamera.look](../../argaze.md/#argaze.ArFeatures.ArCamera.look) method only process gaze movement identification without any AOI support as no scene description is provided into the JSON configuration file.
- Read the next chapters to learn [how to estimate scene pose](pose_estimation.md) and [how to project AOI](aoi_projection.md). \ No newline at end of file
+ Read the next chapters to learn [how to estimate scene pose](pose_estimation.md), [how to describe 3D scene's AOI](aoi_3d_description.md) and [how to project them into camera frame](aoi_3d_projection.md). \ No newline at end of file
diff --git a/docs/user_guide/aruco_markers_pipeline/introduction.md b/docs/user_guide/aruco_markers_pipeline/introduction.md
index 836569a..37ab055 100644
--- a/docs/user_guide/aruco_markers_pipeline/introduction.md
+++ b/docs/user_guide/aruco_markers_pipeline/introduction.md
@@ -1,29 +1,29 @@
Overview
========
-This section explains how to build augmented reality pipelines based on ArUco Markers technology for various use cases.
+This section explains how to build augmented reality pipelines based on [ArUco Markers technology](https://www.sciencedirect.com/science/article/abs/pii/S0031320314000235) for various use cases.
-The OpenCV library provides a module to detect fiducial markers into a picture and estimate their poses (cf [OpenCV ArUco tutorial page](https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html)).
+The OpenCV library provides a module to detect fiducial markers into a picture and estimate their poses.
-![OpenCV ArUco markers](https://pyimagesearch.com/wp-content/uploads/2020/12/aruco_generate_tags_header.png)
+![OpenCV ArUco markers](../../img/opencv_aruco.png)
-The ArGaze [ArUcoMarkers submodule](../../argaze.md/#argaze.ArUcoMarkers) eases markers creation, optic calibration, markers detection and 3D scene pose estimation through a set of high level classes.
+The ArGaze [ArUcoMarkers submodule](../../argaze.md/#argaze.ArUcoMarkers) eases markers creation, markers detection and 3D scene pose estimation through a set of high level classes.
-First, let's look at the schema below: it gives an overview of the main notions involved in the following chapters.
+<!-- First, let's look at the schema below: it gives an overview of the main notions involved in the following chapters. -->
-![ArUco markers pipeline](../../img/aruco_markers_pipeline.png)
+<!-- ![ArUco markers pipeline](../../img/aruco_markers_pipeline.png) -->
To build your own ArUco markers pipeline, you need to know:
* [How to setup ArUco markers into a scene](aruco_markers_description.md),
-* [How to describe scene's AOI](aoi_description.md),
* [How to load and execute ArUco markers pipeline](configuration_and_execution.md),
* [How to estimate scene pose](pose_estimation.md),
-* [How to project AOI into camera frame](aoi_projection.md),
-* [How to visualize ArUcoCamera and ArUcoScenes](visualisation.md)
+* [How to describe scene's AOI](aoi_3d_description.md),
+* [How to project 3D AOI into camera frame](aoi_3d_projection.md),
+* [How to define a 3D AOI as a frame](aoi_3d_frame.md).
More advanced features are also explained like:
-* [How to script ArUco markers pipeline](advanced_topics/scripting.md)
-* [How to calibrate optic parameters](optic_parameters_calibration.md)
-* [How to improve ArUco markers detection](advanced_topics/aruco_detector_configuration.md)
+* [How to script ArUco markers pipeline](advanced_topics/scripting.md),
+* [How to calibrate optic parameters](advanced_topics/optic_parameters_calibration.md),
+* [How to improve ArUco markers detection](advanced_topics/aruco_detector_configuration.md).
diff --git a/docs/user_guide/aruco_markers_pipeline/pose_estimation.md b/docs/user_guide/aruco_markers_pipeline/pose_estimation.md
index 6acafee..6b58b24 100644
--- a/docs/user_guide/aruco_markers_pipeline/pose_estimation.md
+++ b/docs/user_guide/aruco_markers_pipeline/pose_estimation.md
@@ -1,13 +1,13 @@
Estimate scene pose
===================
-An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) class defines a space with [ArUco markers inside](aruco_markers_description.md) helping to estimate scene pose when they are watched by [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera).
+Once [ArUco markers are placed into a scene](aruco_markers_description.md) and [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) is [configured](configuration_and_execution.md), scene pose can be estimated.
![Scene pose estimation](../../img/aruco_camera_pose_estimation.png)
## Add ArUcoScene to ArUcoCamera JSON configuration file
-An [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) instance can contains multiples [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene).
+An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) class defines a space with [ArUco markers inside](aruco_markers_description.md) helping to estimate scene pose when they are watched by [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera).
Here is an extract from the JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) configuration file with a sample where one scene is added and displayed:
@@ -27,17 +27,17 @@ Here is an extract from the JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMark
"rotation": [0, 0, 0]
},
"1": {
- "translation": [0, 25.5, 2],
- "rotation": [0, 45, 0]
+ "translation": [0, 26.5, 2],
+ "rotation": [0, 70, 0]
},
"2": {
- "translation": [35.5, 0, 3],
- "rotation": [-45, 0, 0]
+ "translation": [46.5, 0, 7],
+ "rotation": [-90, 0, 0]
}
}
}
}
- }
+ },
...
"image_parameters": {
...
@@ -51,10 +51,6 @@ Here is an extract from the JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMark
"draw_places": {
"color": [0, 0, 0],
"border_size": 1
- },
- "draw_places_axes": {
- "thickness": 1,
- "length": 2.5
}
}
}
@@ -65,11 +61,15 @@ Here is an extract from the JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMark
Now, let's understand the meaning of each JSON entry.
-### "MyScene"
+### *scenes*
+
+An [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) instance can contains multiples [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) stored by name.
+
+### MyScene
-The name of the [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene). Basically useful for visualisation purpose.
+The name of an [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene). Basically useful for visualisation purpose.
-### ArUco markers group
+### *aruco_markers_group*
The 3D places of ArUco markers into the scene as defined at [ArUco markers description chapter](aruco_markers_description.md). Thanks to this description, it is possible to estimate the pose of [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) in [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) frame.
@@ -77,6 +77,6 @@ The 3D places of ArUco markers into the scene as defined at [ArUco markers descr
[ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) pose estimation is done when calling the [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method.
-### Draw scenes
+### *draw_scenes*
The drawing parameters of each loaded [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) in [ArUcoCamera.image](../../argaze.md/#argaze.ArFeatures.ArFrame.image).
diff --git a/docs/user_guide/gaze_analysis/gaze_movement.md b/docs/user_guide/gaze_analysis/gaze_movement.md
deleted file mode 100644
index 83f67e1..0000000
--- a/docs/user_guide/gaze_analysis/gaze_movement.md
+++ /dev/null
@@ -1,163 +0,0 @@
-Gaze movement
-=============
-
-## Definition
-
-!!! note
-
- *"The act of classifying eye movements into distinct events is, on a general level, driven by a desire to isolate different intervals of the data stream strongly correlated with certain oculomotor or cognitive properties."*
-
- Citation from ["One algorithm to rule them all? An evaluation and discussion of ten eye movement event-detection algorithms"](https://link.springer.com/article/10.3758/s13428-016-0738-9) article.
-
-[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines abstract [GazeMovement](../../argaze.md/#argaze.GazeFeatures.GazeMovement) class, then abstract [Fixation](../../argaze.md/#argaze.GazeFeatures.Fixation) and [Saccade](../../argaze.md/#argaze.GazeFeatures.Saccade) classes which inherit from [GazeMovement](../../argaze.md/#argaze.GazeFeatures.GazeMovement).
-
-The **positions** [GazeMovement](../../argaze.md/#argaze.GazeFeatures.GazeMovement) attribute contain all [GazePositions](../../argaze.md/#argaze.GazeFeatures.GazePosition) belonging to itself.
-
-![Fixation and Saccade](../../img/fixation_and_saccade.png)
-
-## Identification
-
-[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines abstract [GazeMovementIdentifier](../../argaze.md/#argaze.GazeFeatures.GazeMovementIdentifier) classe to let add various identification algorithms.
-
-Some gaze movement identification algorithms are available thanks to [GazeAnalysis](../../argaze.md/#argaze.GazeAnalysis) submodule:
-
-* [Dispersion threshold identification (I-DT)](../../argaze.md/#argaze.GazeAnalysis.DispersionThresholdIdentification)
-* [Velocity threshold identification (I-VT)](../../argaze.md/#argaze.GazeAnalysis.VelocityThresholdIdentification)
-
-### Identify method
-
-[GazeMovementIdentifier.identify](../../argaze.md/#argaze.GazeFeatures.GazeMovementIdentifier.identify) method allows to fed its identification algorithm with successive gaze positions to output Fixation, Saccade or any kind of GazeMovement instances.
-
-Here is a sample of code based on [I-DT](../../argaze.md/#argaze.GazeAnalysis.DispersionThresholdIdentification) algorithm to illustrate how to use it:
-
-``` python
-from argaze import GazeFeatures
-from argaze.GazeAnalysis import DispersionThresholdIdentification
-
-# Create a gaze movement identifier based on dispersion algorithm with 50px max deviation 200 ms max duration thresholds
-gaze_movement_identifier = DispersionThresholdIdentification.GazeMovementIdentifier(50, 200)
-
-# Assuming that timestamped gaze positions are provided through live stream or later data reading
-...:
-
- gaze_movement = gaze_movement_identifier.identify(timestamp, gaze_position)
-
- # Fixation identified
- if GazeFeatures.is_fixation(gaze_movement):
-
- # Access to first gaze position of identified fixation
- start_ts, start_position = gaze_movement.positions.first
-
- # Access to fixation duration
- print('duration: {gaze_movement.duration}')
-
- # Iterate over all gaze positions of identified fixation
- for ts, position in gaze_movement.positions.items():
-
- # Do something with each fixation position
- ...
-
- # Saccade identified
- elif GazeFeatures.is_saccade(gaze_movement):
-
- # Access to first gaze position of identified saccade
- start_ts, start_position = gaze_movement.positions.first
-
- # Access to saccade amplitude
- print('amplitude: {gaze_movement.amplitude}')
-
- # Iterate over all gaze positions of identified saccade
- for ts, position in gaze_movement.positions.items():
-
- # Do something with each saccade position
- ...
-
- # No gaze movement identified
- else:
-
- continue
-
-```
-
-### Browse method
-
-[GazeMovementIdentifier.browse](../../argaze.md/#argaze.GazeFeatures.GazeMovementIdentifier.browse) method allows to pass a [TimeStampedGazePositions](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazePositions) buffer to apply identification algorithm on all gaze positions inside.
-
-Identified gaze movements are returned through:
-
-* [TimeStampedGazeMovements](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeMovements) instance where all fixations are stored by starting gaze position timestamp.
-* [TimeStampedGazeMovements](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeMovements) instance where all saccades are stored by starting gaze position timestamp.
-* [TimeStampedGazeStatus](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeStatus) instance where all gaze positions are linked to a fixation or saccade index.
-
-``` python
-# Assuming that timestamped gaze positions are provided through data reading
-
-ts_fixations, ts_saccades, ts_status = gaze_movement_identifier.browse(ts_gaze_positions)
-
-```
-
-* ts_fixations would look like:
-
-|timestamp|positions |duration|dispersion|focus |
-|:--------|:-------------------------------------------------------------|:-------|:---------|:--------|
-|60034 |{"60034":[846,620], "60044":[837,641], "60054":[835,649], ...}|450 |40 |(840,660)|
-|60504 |{"60504":[838,667], "60514":[838,667], "60524":[837,669], ...}|100 |38 |(834,651)|
-|... |... |... |.. |... |
-
-* ts_saccades would look like:
-
-|timestamp|positions |duration|
-|:--------|:---------------------------------------|:-------|
-|60484 |{"60484":[836, 669], "60494":[837, 669]}|10 |
-|60594 |{"60594":[833, 613], "60614":[927, 601]}|20 |
-|... |... |... |
-
-* ts_status would look like:
-
-|timestamp|position |type |index|
-|:--------|:---------|:-------|:----|
-|60034 |(846, 620)|Fixation|1 |
-|60044 |(837, 641)|Fixation|1 |
-|... |... |... |. |
-|60464 |(836, 668)|Fixation|1 |
-|60474 |(836, 668)|Fixation|1 |
-|60484 |(836, 669)|Saccade |1 |
-|60494 |(837, 669)|Saccade |1 |
-|60504 |(838, 667)|Fixation|2 |
-|60514 |(838, 667)|Fixation|2 |
-|... |... |... |. |
-|60574 |(825, 629)|Fixation|2 |
-|60584 |(829, 615)|Fixation|2 |
-|60594 |(833, 613)|Saccade |2 |
-|60614 |(927, 601)|Saccade |2 |
-|60624 |(933, 599)|Fixation|3 |
-|60634 |(934, 603)|Fixation|3 |
-|... |... |... |. |
-
-
-!!! note
- [TimeStampedGazeMovements](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeMovements), [TimeStampedGazeMovements](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeMovements) and [TimeStampedGazeStatus](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeStatus) classes inherit from [TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) class.
-
- Read [Timestamped data](../timestamped_data/introduction.md) section to understand all features it provides.
-
-### Generator method
-
-[GazeMovementIdentifier](../../argaze.md/#argaze.GazeFeatures.GazeMovementIdentifier) can be called with a [TimeStampedGazePositions](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazePositions) buffer in argument to generate gaze movement each time one is identified.
-
-``` python
-# Assuming that timestamped gaze positions are provided through data reading
-
-for ts, gaze_movement in gaze_movement_identifier(ts_gaze_positions):
-
- # Fixation identified
- if GazeFeatures.is_fixation(gaze_movement):
-
- # Do something with each fixation
- ...
-
- # Saccade identified
- elif GazeFeatures.is_saccade(gaze_movement):
-
- # Do something with each saccade
- ...
-``` \ No newline at end of file
diff --git a/docs/user_guide/gaze_analysis/gaze_position.md b/docs/user_guide/gaze_analysis/gaze_position.md
deleted file mode 100644
index 48495b4..0000000
--- a/docs/user_guide/gaze_analysis/gaze_position.md
+++ /dev/null
@@ -1,98 +0,0 @@
-Gaze position
-=============
-
-[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines a [GazePosition](../../argaze.md/#argaze.GazeFeatures.GazePosition) class to handle point coordinates with a precision value.
-
-``` python
-from argaze import GazeFeatures
-
-# Define a basic gaze position
-gaze_position = GazeFeatures.GazePosition((123, 456))
-
-# Define a gaze position with a precision value
-gaze_position = GazeFeatures.GazePosition((789, 765), precision=10)
-
-# Access to gaze position value and precision
-print(f'position: {gaze_position.value}')
-print(f'precision: {gaze_position.precision}')
-
-```
-
-## Validity
-
-[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines also a [UnvalidGazePosition](../../argaze.md/#argaze.GazeFeatures.UnvalidGazePosition) class that inherits from [GazePosition](../../argaze.md/#argaze.GazeFeatures.GazePosition) to handle case where no gaze position exists because of any specific device reason.
-
-``` python
-from argaze import GazeFeatures
-
-# Define a basic unvalid gaze position
-gaze_position = GazeFeatures.UnvalidGazePosition()
-
-# Define a basic unvalid gaze position with a message value
-gaze_position = GazeFeatures.UnvalidGazePosition("Something bad happened")
-
-# Access to gaze position validity
-print(f'validity: {gaze_position.valid}')
-
-```
-
-## Distance
-
-[GazePosition](../../argaze.md/#argaze.GazeFeatures.GazePosition) class provides a **distance** method to calculate the distance to another gaze position instance.
-
-![Distance](../../img/distance.png)
-
-``` python
-# Distance between A and B positions
-d = gaze_position_A.distance(gaze_position_B)
-```
-
-## Overlapping
-
-[GazePosition](../../argaze.md/#argaze.GazeFeatures.GazePosition) class provides an **overlap** method to test if a gaze position overlaps another one considering their precisions.
-
-![Gaze overlapping](../../img/overlapping.png)
-
-``` python
-# Check that A overlaps B
-if gaze_position_A.overlap(gaze_position_B):
-
- # Do something if A overlaps B
- ...
-
-# Check that A overlaps B and B overlaps A
-if gaze_position_A.overlap(gaze_position_B, both=True):
-
- # Do something if A overlaps B AND B overlaps A
- ...
-```
-
-## Timestamped gaze positions
-
-[TimeStampedGazePositions](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazePositions) inherits from [TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) class to handle especially gaze positions.
-
-### Import from dataframe
-
-It is possible to load timestamped gaze positions from a [Pandas DataFrame](https://pandas.pydata.org/docs/getting_started/intro_tutorials/01_table_oriented.html#min-tut-01-tableoriented) object.
-
-```python
-import pandas
-
-# Load gaze positions from a CSV file into Panda Dataframe
-dataframe = pandas.read_csv('gaze_positions.csv', delimiter="\t", low_memory=False)
-
-# Convert Panda dataframe into TimestampedGazePositions buffer precising the use of each specific column labels
-ts_gaze_positions = GazeFeatures.TimeStampedGazePositions.from_dataframe(dataframe, timestamp = 'Recording timestamp [ms]', x = 'Gaze point X [px]', y = 'Gaze point Y [px]')
-
-```
-### Iterator
-
-Like [TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer), [TimeStampedGazePositions](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazePositions) class provides iterator feature:
-
-```python
-for timestamp, gaze_position in ts_gaze_positions.items():
-
- # Do something with each gaze position
- ...
-
-```
diff --git a/docs/user_guide/gaze_analysis/introduction.md b/docs/user_guide/gaze_analysis/introduction.md
deleted file mode 100644
index bf818ba..0000000
--- a/docs/user_guide/gaze_analysis/introduction.md
+++ /dev/null
@@ -1,7 +0,0 @@
-Gaze analysis
-=============
-
-This section refers to:
-
-* [GazeFeatures](../../argaze.md/#argaze.GazeFeatures)
-* [GazeAnalysis](../../argaze.md/#argaze.GazeAnalysis) \ No newline at end of file
diff --git a/docs/user_guide/gaze_analysis/scan_path.md b/docs/user_guide/gaze_analysis/scan_path.md
deleted file mode 100644
index 46af28b..0000000
--- a/docs/user_guide/gaze_analysis/scan_path.md
+++ /dev/null
@@ -1,169 +0,0 @@
-Scan path
-=========
-
-[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines classes to handle successive fixations/saccades and analyse their spatial or temporal properties.
-
-## Fixation based scan path
-
-### Definition
-
-The [ScanPath](../../argaze.md/#argaze.GazeFeatures.ScanPath) class is defined as a list of [ScanSteps](../../argaze.md/#argaze.GazeFeatures.ScanStep) which are defined as a fixation and a consecutive saccade.
-
-![Fixation based scan path](../../img/scan_path.png)
-
-As fixations and saccades are identified, the scan path is built by calling respectively [append_fixation](../../argaze.md/#argaze.GazeFeatures.ScanPath.append_fixation) and [append_saccade](../../argaze.md/#argaze.GazeFeatures.ScanPath.append_saccade) methods.
-
-### Analysis
-
-[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines abstract [ScanPathAnalyzer](../../argaze.md/#argaze.GazeFeatures.ScanPathAnalyzer) classe to let add various analysis algorithms.
-
-Some scan path analysis are available thanks to [GazeAnalysis](../../argaze.md/#argaze.GazeAnalysis) submodule:
-
-* [K-Coefficient](../../argaze.md/#argaze.GazeAnalysis.KCoefficient)
-* [Nearest Neighbor Index](../../argaze.md/#argaze.GazeAnalysis.NearestNeighborIndex)
-* [Exploit Explore Ratio](../../argaze.md/#argaze.GazeAnalysis.ExploitExploreRatio)
-
-### Example
-
-Here is a sample of code to illustrate how to built a scan path and analyze it:
-
-``` python
-from argaze import GazeFeatures
-from argaze.GazeAnalysis import KCoefficient
-
-# Create a empty scan path
-scan_path = GazeFeatures.ScanPath()
-
-# Create a K coefficient analyzer
-kc_analyzer = KCoefficient.ScanPathAnalyzer()
-
-# Assuming a gaze movement is identified at ts time
-...:
-
- # Fixation identified
- if GazeFeatures.is_fixation(gaze_movement):
-
- # Append fixation to scan path : no step is created
- scan_path.append_fixation(ts, gaze_movement)
-
- # Saccade identified
- elif GazeFeatures.is_saccade(gaze_movement):
-
- # Append saccade to scan path : a new step should be created
- new_step = scan_path.append_saccade(data_ts, gaze_movement)
-
- # Analyse scan path
- if new_step:
-
- K = kc_analyzer.analyze(scan_path)
-
- # Do something with K metric
- ...
-```
-
-## AOI based scan path
-
-### Definition
-
-The [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) class is defined as a list of [AOIScanSteps](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) which are defined as set of consecutives fixations looking at a same Area Of Interest (AOI) and a consecutive saccade.
-
-![AOI based scan path](../../img/aoi_scan_path.png)
-
-As fixations and saccades are identified, the scan path is built by calling respectively [append_fixation](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.append_fixation) and [append_saccade](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.append_saccade) methods.
-
-### Analysis
-
-[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines abstract [AOIScanPathAnalyzer](../../argaze.md/#argaze.GazeFeatures.AOIScanPathAnalyzer) classe to let add various analysis algorithms.
-
-Some scan path analysis are available thanks to [GazeAnalysis](../../argaze.md/#argaze.GazeAnalysis) submodule:
-
-* [Transition matrix](../../argaze.md/#argaze.GazeAnalysis.TransitionMatrix)
-* [Entropy](../../argaze.md/#argaze.GazeAnalysis.Entropy)
-* [Lempel-Ziv complexity](../../argaze.md/#argaze.GazeAnalysis.LempelZivComplexity)
-* [N-Gram](../../argaze.md/#argaze.GazeAnalysis.NGram)
-* [K-modified coefficient](../../argaze.md/#argaze.GazeAnalysis.KCoefficient)
-
-### Example
-
-Here is a sample of code to illustrate how to built a AOI scan path and analyze it:
-
-``` python
-from argaze import GazeFeatures
-from argaze.GazeAnalysis import LempelZivComplexity
-
-# Assuming all AOI names are listed
-...
-
-# Create a empty AOI scan path
-aoi_scan_path = GazeFeatures.AOIScanPath(aoi_names)
-
-# Create a Lempel-Ziv complexity analyzer
-lzc_analyzer = LempelZivComplexity.AOIScanPathAnalyzer()
-
-# Assuming a gaze movement is identified at ts time
-...:
-
- # Fixation identified
- if GazeFeatures.is_fixation(gaze_movement):
-
- # Assuming fixation is detected as inside an AOI
- ...
-
- # Append fixation to AOI scan path : a new step should be created
- new_step = aoi_scan_path.append_fixation(ts, gaze_movement, looked_aoi_name)
-
- # Analyse AOI scan path
- if new_step:
-
- LZC = kc_analyzer.analyze(aoi_scan_path)
-
- # Do something with LZC metric
- ...
-
- # Saccade identified
- elif GazeFeatures.is_saccade(gaze_movement):
-
- # Append saccade to scan path : no step is created
- aoi_scan_path.append_saccade(data_ts, gaze_movement)
-
-```
-
-### Advanced
-
-The [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) class provides some advanced features to analyse it.
-
-#### Letter sequence
-
-When a new [AOIScanStep](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) is created, the [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) internally affects a unique letter index related to its AOI to ease pattern analysis.
-Then, the [AOIScanPath letter_sequence](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.letter_sequence) property returns the concatenation of each [AOIScanStep](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) letter.
-The [AOIScanPath get_letter_aoi](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.get_letter_aoi) method helps to get back the AOI related to a letter index.
-
-``` python
-# Assuming the following AOI scan path is built: Foo > Bar > Shu > Foo
-aoi_scan_path = ...
-
-# Letter sequence representation should be: 'ABCA'
-print(aoi_scan_path.letter_sequence)
-
-# Output should be: 'Bar'
-print(aoi_scan_path.get_letter_aoi('B'))
-
-```
-
-#### Transition matrix
-
-When a new [AOIScanStep](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) is created, the [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) internally counts the number of transitions from an AOI to another AOI to ease Markov chain analysis.
-Then, the [AOIScanPath transition_matrix](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.transition_matrix) property returns a [Pandas DataFrame](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html) where indexes are transition departures and columns are transition destinations.
-
-Here is an exemple of transition matrix for the following [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath): Foo > Bar > Shu > Foo > Bar
-
-| |Foo|Bar|Shu|
-|:--|:--|:--|:--|
-|Foo|0 |2 |0 |
-|Bar|0 |0 |1 |
-|Shu|1 |0 |0 |
-
-
-#### Fixations count
-
-The [AOIScanPath fixations_count](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.fixations_count) method returns the total number of fixations in the whole scan path and a dictionary to get the fixations count per AOI.
diff --git a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/module_loading.md b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/module_loading.md
index 0b45368..f2e84d6 100644
--- a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/module_loading.md
+++ b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/module_loading.md
@@ -1,7 +1,7 @@
Loading modules from another package
====================================
-It possible to load GazeMovementIdentifier, ScanPathAnalyzer or AOIScanPathAnalyzer modules from another [python package](https://docs.python.org/3/tutorial/modules.html#packages).
+It possible to load [GazeMovementIdentifier](../../../argaze.md/#argaze.GazeFeatures.GazeMovementIdentifier), [ScanPathAnalyzer](../../../argaze.md/#argaze.GazeFeatures.ScanPathAnalyzer), [AOIMatcher](../../../argaze.md/#argaze.GazeFeatures.AOIMatcher) or [AOIScanPathAnalyzer](../../../argaze.md/#argaze.GazeFeatures.AOIScanPathAnalyzer) modules from another [python package](https://docs.python.org/3/tutorial/modules.html#packages).
To do so, simply prepend the package where to find the module into the JSON configuration file:
@@ -20,6 +20,12 @@ To do so, simply prepend the package where to find the module into the JSON conf
}
}
...
+ "aoi_matcher": {
+ "my_package.MyAOIMatcherAlgorithm": {
+ "specific_plugin_parameter": 0
+ }
+ }
+ ...
"aoi_scan_path_analyzers": {
"my_package.MyAOIScanPathAnalyzerAlgorithm": {
"specific_plugin_parameter": 0
@@ -28,7 +34,7 @@ To do so, simply prepend the package where to find the module into the JSON conf
}
```
-Then, load your package from the python script where the ArFrame is created.
+Then, load your package from the python script where the [ArFrame](../../../argaze.md/#argaze.ArFeatures.ArFrame) is created.
```python
from argaze import ArFeatures
diff --git a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md
index 81efa40..eefeee1 100644
--- a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md
+++ b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md
@@ -106,7 +106,7 @@ for name, ar_layer in ar_frame.layers.items():
Let's understand the meaning of each returned data.
-### Gaze movement
+### *gaze_movement*
A [GazeMovement](../../../argaze.md/#argaze.GazeFeatures.GazeMovement) once it have been identified by [ArFrame.gaze_movement_identifier](../../../argaze.md/#argaze.ArFeatures.ArFrame) object from incoming consecutive timestamped gaze positions. If no gaze movement have been identified, it returns an [UnvalidGazeMovement](../../../argaze.md/#argaze.GazeFeatures.UnvalidGazeMovement).
@@ -115,25 +115,25 @@ In that case, the returned gaze movement *finished* flag is false.
Then, the returned gaze movement type can be tested thanks to [GazeFeatures.is_fixation](../../../argaze.md/#argaze.GazeFeatures.is_fixation) and [GazeFeatures.is_saccade](../../../argaze.md/#argaze.GazeFeatures.is_saccade) functions.
-### Scan path analysis
+### *scan_path_analysis*
A dictionary with all last scan path analysis if new scan step have been added to the [ArFrame.scan_path](../../../argaze.md/#argaze.ArFeatures.ArFrame) object.
-### Layers analysis
+### *layers_analysis*
A dictionary with all layers AOI scan path analysis if new AOI scan step have been added to an [ArLayer.aoi_scan_path](../../../argaze.md/#argaze.ArFeatures.ArLayer) object.
-### Execution times
+### *execution_times*
A dictionary with each pipeline step execution time.
-### Exception
+### *exception*
A [python Exception](https://docs.python.org/3/tutorial/errors.html#exceptions) object raised during pipeline execution.
## Setup ArFrame image parameters
-[ArFrame.image](../../argaze.md/#argaze.ArFeatures.ArFrame.image) method parameters can be configured thanks to a python dictionary.
+[ArFrame.image](../../../argaze.md/#argaze.ArFeatures.ArFrame.image) method parameters can be configured thanks to a python dictionary.
```python
# Assuming ArFrame is loaded
diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md b/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md
new file mode 100644
index 0000000..4b7ed69
--- /dev/null
+++ b/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md
@@ -0,0 +1,57 @@
+Describe 2D AOI
+================
+
+Once [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) is [configured](configuration_and_execution.md), [areas of interest (AOI)](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) need to be described to know what is looked in frame.
+
+![2D AOI description](../../img/aoi_2d_description.png)
+
+According common computer graphics coordinates convention, all AOI need to be described from a top left frame corner origin with a coordinate system where:
+
+* +X is pointing to the right,
+* +Y is pointing to the downward.
+
+!!! warning
+ All AOI spatial values must be given in **pixels**.
+
+### Edit SVG file description
+
+SVG file format could be exported from most vector graphics editors.
+
+``` xml
+<svg>
+ <path id="GeoSector" d="M860,160L1380,100L1660,400L1380,740L1440,960L920,920L680,800L640,560L860,160Z"/>
+ <rect id="LeftPanel" x="0" y="0" width="350" height="1080"/>
+ <circle id="CircularWidget" cx="1800" cy="120" r="80"/>
+</svg>
+```
+
+Here are common SVG file features needed to describe AOI:
+
+* *id* attribute indicates AOI name.
+* *path* element describes any polygon using only [M, L and Z path intructions](https://www.w3.org/TR/SVG2/paths.html#PathData)
+* *rect*, *circle* and *ellipse* allow respectively to describe rectangular, circular and elliptic AOI.
+
+### Edit JSON file description
+
+JSON file format allows to describe AOI.
+
+``` json
+{
+ "GeoSector": [[860, 160], [1380, 100], [1660, 400], [1380, 740], [1440, 960], [920, 920], [680, 800], [640, 560]],
+ "LeftPanel": {
+ "Rectangle": {
+ "x": 0,
+ "y": 0,
+ "width": 350,
+ "height": 1080
+ }
+ },
+ "CircularWidget": {
+ "Circle": {
+ "cx": 1800,
+ "cy": 120,
+ "radius": 80
+ }
+ }
+}
+```
diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md
index ffc72c7..66fa12f 100644
--- a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md
+++ b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md
@@ -1,13 +1,13 @@
-Add AOI analysis
-================
+Enable AOI analysis
+===================
-The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class defines a space where to make matching of gaze movements with AOIs and inside which those matchings need to be analyzed.
+Once [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) is [configured](configuration_and_execution.md) and [2D AOI are described](aoi_2d_description.md), fixation can be matched with AOI to build an AOI scan path before analyzing it.
![Layer](../../img/ar_layer.png)
## Add ArLayer to ArFrame JSON configuration file
-An [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) instance can contains multiples [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer).
+The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class defines a space where to make matching of fixations with AOI and inside which those matchings need to be analyzed.
Here is an extract from the JSON [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) configuration file with a sample where one layer is added:
@@ -19,10 +19,22 @@ Here is an extract from the JSON [ArFrame](../../argaze.md/#argaze.ArFeatures.Ar
"layers": {
"MyLayer": {
"aoi_scene" : {
- "upper_left_area": [[0, 0], [960, 0], [960, 540], [0, 540]],
- "upper_right_area": [[960, 0], [1920, 0], [1920, 540], [960, 540]],
- "lower_left_area": [[0, 540], [960, 540], [960, 1080], [0, 1080]],
- "lower_right_area": [[960, 540], [1920, 540], [1920, 1080], [960, 1080]]
+ "GeoSector": [[860, 160], [1380, 100], [1660, 400], [1380, 740], [1440, 960], [920, 920], [680, 800], [640, 560]],
+ "LeftPanel": {
+ "Rectangle": {
+ "x": 0,
+ "y": 0,
+ "width": 350,
+ "height": 1080
+ }
+ },
+ "CircularWidget": {
+ "Circle": {
+ "cx": 1800,
+ "cy": 120,
+ "radius": 80
+ }
+ }
},
"aoi_matcher": {
"DeviationCircleCoverage": {
@@ -51,46 +63,50 @@ Here is an extract from the JSON [ArFrame](../../argaze.md/#argaze.ArFeatures.Ar
Now, let's understand the meaning of each JSON entry.
-### "MyLayer"
+### *layers*
-The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose.
+An [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) instance can contains multiples [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer) stored by name.
-### AOI Scene
+### MyLayer
-The [AOIScene](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AOIScene) defines a set of 2D [AreaOfInterest](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) registered by name.
+The name of an [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose.
-![AOI Scene](../../img/ar_layer_aoi_scene.png)
+### *aoi_scene*
-### AOI Matcher
+The set of 2D AOI into the layer as defined at [2D AOI description chapter](aoi_2d_description.md).
-The first [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step aims to make match identified gaze movement with an AOI of the scene.
+![AOI scene](../../img/aoi_2d_description.png)
-![AOI Matcher](../../img/ar_layer_aoi_matcher.png)
+### *aoi_matcher*
-The matching algorithm can be selected by instantiating a particular AOIMatcher [from GazeAnalysis submodule](pipeline_modules/aoi_matchers.md) or [from another python package](advanced_topics/module_loading.md).
+The first [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step aims to make match identified gaze movement with a layer's AOI.
+
+![AOI matcher](../../img/aoi_matcher.png)
+
+The matching algorithm can be selected by instantiating a particular [AOIMatcher from GazeAnalysis submodule](pipeline_modules/aoi_matchers.md) or [from another python package](advanced_topics/module_loading.md).
In the example file, the choosen matching algorithm is the [Deviation Circle Coverage](../../argaze.md/#argaze.GazeAnalysis.DeviationCircleCoverage) which has one specific *coverage_threshold* attribute.
!!! warning "Mandatory"
- JSON *aoi_matcher* entry is mandatory. Otherwise, the AOIScanPath and AOIScanPathAnalyzers steps are disabled.
+ JSON *aoi_matcher* entry is mandatory. Otherwise, the [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) and [AOIScanPathAnalyzers](../../argaze.md/#argaze.GazeFeatures.AOIScanPathAnalyzer) steps are disabled.
-### AOI Scan Path
+### *aoi_scan_path*
The second [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step aims to build a [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) defined as a list of [AOIScanSteps](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) made by a set of successive fixations/saccades onto a same AOI.
-![AOI Scan Path](../../img/ar_layer_aoi_scan_path.png)
+![AOI scan path](../../img/aoi_scan_path.png)
-Once identified gaze movements are matched to AOI, they are automatically appended to the AOIScanPath if required.
+Once gaze movements are matched to AOI, they are automatically appended to the AOIScanPath if required.
The [AOIScanPath.duration_max](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.duration_max) attribute is the duration from which older AOI scan steps are removed each time new AOI scan steps are added.
!!! note "Optional"
- JSON *aoi_scan_path* entry is not mandatory. If aoi_scan_path_analyzers entry is not empty, the AOIScanPath step is automatically enabled.
+ JSON *aoi_scan_path* entry is not mandatory. If aoi_scan_path_analyzers entry is not empty, the [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) step is automatically enabled.
-### AOI Scan Path Analyzers
+### *aoi_scan_path_analyzers*
Finally, the last [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step consists in passing the previously built [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) to each loaded [AOIScanPathAnalyzer](../../argaze.md/#argaze.GazeFeatures.AOIScanPathAnalyzer).
-Each analysis algorithm can be selected by instantiating a particular AOIScanPathAnalyzer [from GazeAnalysis submodule](pipeline_modules/aoi_scan_path_analyzers.md) or [from another python package](advanced_topics/module_loading.md).
+Each analysis algorithm can be selected by instantiating a particular [AOIScanPathAnalyzer from GazeAnalysis submodule](pipeline_modules/aoi_scan_path_analyzers.md) or [from another python package](advanced_topics/module_loading.md).
In the example file, the choosen analysis algorithms are the [Basic](../../argaze.md/#argaze.GazeAnalysis.Basic) module, the [TransitionMatrix](../../argaze.md/#argaze.GazeAnalysis.TransitionMatrix) module and the [NGram](../../argaze.md/#argaze.GazeAnalysis.NGram) module which has two specific *n_min* and *n_max* attributes.
diff --git a/docs/user_guide/gaze_analysis_pipeline/background.md b/docs/user_guide/gaze_analysis_pipeline/background.md
index a7d59f6..a61abdc 100644
--- a/docs/user_guide/gaze_analysis_pipeline/background.md
+++ b/docs/user_guide/gaze_analysis_pipeline/background.md
@@ -3,7 +3,7 @@ Add a background
Background is an optional [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) attribute to display any image behind pipeline visualisation.
-![Background](../../img/ar_frame_background.png)
+![Background](../../img/background.png)
## Load and display ArFrame background
@@ -16,7 +16,7 @@ Here is an extract from the JSON ArFrame configuration file where a background p
"name": "My FullHD screen",
"size": [1920, 1080],
...
- "background": "./joconde.png",
+ "background": "./bosch.png",
...
"image_parameters": {
...
@@ -30,10 +30,10 @@ Here is an extract from the JSON ArFrame configuration file where a background p
Now, let's understand the meaning of each JSON entry.
-### Background
+### *background*
The path to an image file on disk.
-### Background weight
+### *background_weight*
The weight of background overlay in [ArFrame.image](../../argaze.md/#argaze.ArFeatures.ArFrame.image) between 0 and 1.
diff --git a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md
index 5aca8f3..71d3c33 100644
--- a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md
+++ b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md
@@ -26,7 +26,7 @@ Here is a simple JSON [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) conf
},
"scan_path_analyzers": {
"Basic": {},
- "ExploitExploreRatio": {
+ "ExploreExploitRatio": {
"short_fixation_duration_threshold": 0
}
}
@@ -44,24 +44,24 @@ ar_frame = ArFeatures.ArFrame.from_json('./configuration.json')
Now, let's understand the meaning of each JSON entry.
-### Name
+### *name*
The name of the [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame). Basically useful for visualisation purpose.
-### Size
+### *size*
The size of the [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) defines the dimension of the rectangular area where gaze positions are projected. Be aware that gaze positions have to be in the same range of value to be projected in.
!!! warning "Free spatial unit"
Gaze positions can either be integer or float, pixels, millimeters or what ever you need. The only concern is that all spatial values used in further configurations have to be all the same unit.
-### Gaze Movement Identifier
+### *gaze_movement_identifier*
The first [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline step is to identify fixations or saccades from consecutive timestamped gaze positions.
-![Gaze Movement Identifier](../../img/ar_frame_gaze_movement_identifier.png)
+![Gaze movement identifier](../../img/gaze_movement_identifier.png)
-The identification algorithm can be selected by instantiating a particular GazeMovementIdentifier [from GazeAnalysis submodule](pipeline_modules/gaze_movement_identifiers.md) or [from another python package](advanced_topics/module_loading.md).
+The identification algorithm can be selected by instantiating a particular [GazeMovementIdentifier from GazeAnalysis submodule](pipeline_modules/gaze_movement_identifiers.md) or [from another python package](advanced_topics/module_loading.md).
In the example file, the choosen identification algorithm is the [Dispersion Threshold Identification (I-DT)](../../argaze.md/#argaze.GazeAnalysis.DispersionThresholdIdentification) which has two specific *deviation_max_threshold* and *duration_min_threshold* attributes.
@@ -71,11 +71,11 @@ In the example file, the choosen identification algorithm is the [Dispersion Thr
!!! warning "Mandatory"
JSON *gaze_movement_identifier* entry is mandatory. Otherwise, the ScanPath and ScanPathAnalyzers steps are disabled.
-### Scan Path
+### *scan_path*
The second [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline step aims to build a [ScanPath](../../argaze.md/#argaze.GazeFeatures.ScanPath) defined as a list of [ScanSteps](../../argaze.md/#argaze.GazeFeatures.ScanStep) made by a fixation and a consecutive saccade.
-![Scan Path](../../img/ar_frame_scan_path.png)
+![Scan path](../../img/scan_path.png)
Once fixations and saccades are identified, they are automatically appended to the ScanPath if required.
@@ -84,13 +84,13 @@ The [ScanPath.duration_max](../../argaze.md/#argaze.GazeFeatures.ScanPath.durati
!!! note "Optional"
JSON *scan_path* entry is not mandatory. If scan_path_analyzers entry is not empty, the ScanPath step is automatically enabled.
-### Scan Path Analyzers
+### *scan_path_analyzers*
Finally, the last [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline step consists in passing the previously built [ScanPath](../../argaze.md/#argaze.GazeFeatures.ScanPath) to each loaded [ScanPathAnalyzer](../../argaze.md/#argaze.GazeFeatures.ScanPathAnalyzer).
-Each analysis algorithm can be selected by instantiating a particular ScanPathAnalyzer [from GazeAnalysis submodule](pipeline_modules/scan_path_analyzers.md) or [from another python package](advanced_topics/module_loading.md).
+Each analysis algorithm can be selected by instantiating a particular [ScanPathAnalyzer from GazeAnalysis submodule](pipeline_modules/scan_path_analyzers.md) or [from another python package](advanced_topics/module_loading.md).
-In the example file, the choosen analysis algorithms are the [Basic](../../argaze.md/#argaze.GazeAnalysis.Basic) module and the [ExploitExploreRatio](../../argaze.md/#argaze.GazeAnalysis.ExploitExploreRatio) module which has one specific *short_fixation_duration_threshold* attribute.
+In the example file, the choosen analysis algorithms are the [Basic](../../argaze.md/#argaze.GazeAnalysis.Basic) module and the [ExploreExploitRatio](../../argaze.md/#argaze.GazeAnalysis.ExploreExploitRatio) module which has one specific *short_fixation_duration_threshold* attribute.
## Pipeline execution
@@ -107,4 +107,4 @@ Timestamped gaze positions have to be passed one by one to [ArFrame.look](../../
At this point, the [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method only process gaze movement identification and scan path analysis without any AOI neither any logging or visualisation supports.
- Read the next chapters to learn how to [add AOI analysis](aoi_analysis.md), [log gaze analysis](logging.md) and [visualize pipeline steps](visualisation.md). \ No newline at end of file
+ Read the next chapters to learn how to [describe AOI](aoi_2d_description.md), [add AOI analysis](aoi_analysis.md), [log gaze analysis](logging.md) and [visualize pipeline steps](visualisation.md). \ No newline at end of file
diff --git a/docs/user_guide/gaze_analysis_pipeline/heatmap.md b/docs/user_guide/gaze_analysis_pipeline/heatmap.md
index fe4246e..6d9ad18 100644
--- a/docs/user_guide/gaze_analysis_pipeline/heatmap.md
+++ b/docs/user_guide/gaze_analysis_pipeline/heatmap.md
@@ -3,7 +3,7 @@ Add a heatmap
Heatmap is an optional [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline step. It is executed at each new gaze position to update heatmap image.
-![Heatmap](../../img/ar_frame_heatmap.png)
+![Heatmap](../../img/heatmap.png)
## Enable and display ArFrame heatmap
@@ -33,21 +33,21 @@ Here is an extract from the JSON ArFrame configuration file where heatmap is ena
Now, let's understand the meaning of each JSON entry.
-### Size
+### *size*
The heatmap image size in pixel. Higher size implies higher CPU load.
-### Sigma
+### *sigma*
The gaussian point spreading to draw at each gaze position.
![Point spread](../../img/point_spread.png)
-### Buffer
+### *buffer*
The size of point spread images buffer (0 means no buffering) to visualize only last N gaze positions.
-### Heatmap weight
+### *heatmap_weight*
The weight of heatmap overlay in [ArFrame.image](../../argaze.md/#argaze.ArFeatures.ArFrame.image) between 0 and 1.
diff --git a/docs/user_guide/gaze_analysis_pipeline/introduction.md b/docs/user_guide/gaze_analysis_pipeline/introduction.md
index 02aa82e..65cc53a 100644
--- a/docs/user_guide/gaze_analysis_pipeline/introduction.md
+++ b/docs/user_guide/gaze_analysis_pipeline/introduction.md
@@ -11,13 +11,14 @@ To build your own gaze analysis pipeline, you need to know:
* [How to edit timestamped gaze positions](timestamped_gaze_positions_edition.md),
* [How to load and execute gaze analysis pipeline](configuration_and_execution.md),
-* [How to add AOI analysis](aoi_analysis.md),
-* [How to visualize ArFrame and ArLayers](visualisation.md),
+* [How to describe AOI](aoi_2d_description.md),
+* [How to enable AOI analysis](aoi_analysis.md),
+* [How to visualize pipeline steps outputs](visualisation.md),
* [How to log resulted gaze analysis](logging.md),
-* [How to make heatmap image](heatmap.md).
+* [How to make heatmap image](heatmap.md),
* [How to add a background image](background.md).
More advanced features are also explained like:
-* [How to script gaze analysis pipeline](advanced_topics/scripting.md)
-* [How to load module from another package](advanced_topics/module_loading.md)
+* [How to script gaze analysis pipeline](advanced_topics/scripting.md),
+* [How to load module from another package](advanced_topics/module_loading.md).
diff --git a/docs/user_guide/gaze_analysis_pipeline/logging.md b/docs/user_guide/gaze_analysis_pipeline/logging.md
index 1dea712..055a535 100644
--- a/docs/user_guide/gaze_analysis_pipeline/logging.md
+++ b/docs/user_guide/gaze_analysis_pipeline/logging.md
@@ -7,7 +7,7 @@ Log gaze analysis
[ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) and [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) have a log attribute to enable analysis logging.
-Here is an extract from the JSON ArFrame configuration file where logging is enabled for the ArFrame and for one ArLayer:
+Here is an extract from the JSON ArFrame configuration file where logging is enabled for the [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) and for one [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer):
```json
{
@@ -91,7 +91,7 @@ Assuming that [ArGaze.GazeAnalysis.NGram](../../argaze.md/#argaze.GazeAnalysis.N
|timestamped|ngrams_count|
|:----------|:-----------|
|5687 |"{3: {}, 4: {}, 5: {}}"|
-|6208 |"{3: {('upper_left_corner', 'lower_left_corner', 'lower_right_corner'): 1}, 4: {}, 5: {}}"|
+|6208 |"{3: {('LeftPanel', 'GeoSector', 'CircularWidget'): 1}, 4: {}, 5: {}}"|
|... |... |
diff --git a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_matchers.md b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_matchers.md
index c8fa63c..61338cc 100644
--- a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_matchers.md
+++ b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_matchers.md
@@ -3,7 +3,7 @@ AOI matchers
ArGaze provides ready-to-use AOI matching algorithms.
-Here are JSON samples to include the chosen module inside [ArLayer configuration](../ar_layer_configuration_and_execution.md) *aoi_matcher* entry.
+Here are JSON samples to include the chosen module inside [ArLayer configuration](../aoi_analysis.md) *aoi_matcher* entry.
## Deviation circle coverage
diff --git a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_scan_path_analyzers.md b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_scan_path_analyzers.md
index 8d02967..ad1832d 100644
--- a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_scan_path_analyzers.md
+++ b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_scan_path_analyzers.md
@@ -3,7 +3,7 @@ AOI scan path analyzers
ArGaze provides ready-to-use AOI scan path analysis algorithms.
-Here are JSON samples to include a chosen module inside [ArLayer configuration](../ar_layer_configuration_and_execution.md) *aoi_scan_path_analyzers* entry.
+Here are JSON samples to include a chosen module inside [ArLayer configuration](../aoi_analysis.md) *aoi_scan_path_analyzers* entry.
## Basic metrics
diff --git a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/scan_path_analyzers.md b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/scan_path_analyzers.md
index afba844..f9f757a 100644
--- a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/scan_path_analyzers.md
+++ b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/scan_path_analyzers.md
@@ -13,15 +13,15 @@ Here are JSON samples to include a chosen module inside [ArFrame configuration](
[See in code reference](../../../argaze.md/#argaze.GazeAnalysis.Basic.ScanPathAnalyzer)
-## Exploit/Explore ratio
+## Explore/Exploit ratio
```json
-"ExploitExploreRatio": {
+"ExploreExploitRatio": {
"short_fixation_duration_threshold": 0
}
```
-[See in code reference](../../../argaze.md/#argaze.GazeAnalysis.ExploitExploreRatio.ScanPathAnalyzer)
+[See in code reference](../../../argaze.md/#argaze.GazeAnalysis.ExploreExploitRatio.ScanPathAnalyzer)
## K coefficient
diff --git a/docs/user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md b/docs/user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md
index 93d2a65..2156f3b 100644
--- a/docs/user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md
+++ b/docs/user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md
@@ -3,7 +3,7 @@ Edit timestamped gaze positions
Whatever eye data comes from a file on disk or from a live stream, timestamped gaze positions are required before to go further.
-![Timestamped Gaze Positions](../../img/timestamped_gaze_positions.png)
+![Timestamped gaze positions](../../img/timestamped_gaze_positions.png)
## Import gaze positions from CSV file
diff --git a/docs/user_guide/gaze_analysis_pipeline/visualisation.md b/docs/user_guide/gaze_analysis_pipeline/visualisation.md
index 99f0259..5f06fac 100644
--- a/docs/user_guide/gaze_analysis_pipeline/visualisation.md
+++ b/docs/user_guide/gaze_analysis_pipeline/visualisation.md
@@ -3,7 +3,7 @@ Visualize pipeline steps
Visualisation is not a pipeline step but each [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline steps outputs can be drawn in real time or afterward, depending of application purpose.
-![ArFrame visualisation](../../img/ar_frame_visualisation.png)
+![ArFrame visualisation](../../img/visualisation.png)
## Add image parameters to ArFrame JSON configuration file
@@ -17,6 +17,22 @@ Here is an extract from the JSON ArFrame configuration file with a sample where
"size": [1920, 1080],
...
"image_parameters": {
+ "draw_gaze_positions": {
+ "color": [0, 255, 255],
+ "size": 2
+ },
+ "draw_fixations": {
+ "deviation_circle_color": [255, 255, 255],
+ "duration_border_color": [127, 0, 127],
+ "duration_factor": 1e-2,
+ "draw_positions": {
+ "position_color": [0, 255, 255],
+ "line_color": [0, 0, 0]
+ }
+ },
+ "draw_saccades": {
+ "line_color": [255, 0, 255]
+ },
"draw_scan_path": {
"draw_fixations": {
"deviation_circle_color": [255, 0, 255],
@@ -25,8 +41,7 @@ Here is an extract from the JSON ArFrame configuration file with a sample where
},
"draw_saccades": {
"line_color": [255, 0, 255]
- },
- "deepness": 0
+ }
},
"draw_layers": {
"MyLayer": {
@@ -38,11 +53,11 @@ Here is an extract from the JSON ArFrame configuration file with a sample where
},
"draw_aoi_matching": {
"draw_matched_fixation": {
- "deviation_circle_color": [255, 255, 255]
- },
- "draw_matched_fixation_positions": {
- "position_color": [0, 255, 255],
- "line_color": [0, 0, 0]
+ "deviation_circle_color": [255, 255, 255],
+ "draw_positions": {
+ "position_color": [0, 255, 0],
+ "line_color": [0, 0, 0]
+ }
},
"draw_matched_region": {
"color": [0, 255, 0],
@@ -56,10 +71,6 @@ Here is an extract from the JSON ArFrame configuration file with a sample where
"looked_aoi_name_offset": [0, -10]
}
}
- },
- "draw_gaze_positions": {
- "color": [0, 255, 255],
- "size": 2
}
}
}
@@ -81,7 +92,7 @@ import cv2
# Assuming that timestamped gaze positions have been processed by ArFrame.look method
...
-# Export heatmap image
+# Export ArFrame image
cv2.imwrite('./ar_frame.png', ar_frame.image())
```
diff --git a/docs/user_guide/timestamped_data/data_synchronisation.md b/docs/user_guide/timestamped_data/data_synchronisation.md
deleted file mode 100644
index 5190eab..0000000
--- a/docs/user_guide/timestamped_data/data_synchronisation.md
+++ /dev/null
@@ -1,106 +0,0 @@
-Data synchronisation
-====================
-
-Recorded data needs to be synchronized to link them before further processings.
-
-The [TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) class provides various methods to help in such task.
-
-## Pop last before
-
-![Pop last before](../../img/pop_last_before.png)
-
-The code below shows how to use [pop_last_before](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer.pop_last_before) method in order to synchronise two timestamped data buffers with different timestamps:
-
-``` python
-from argaze import DataStructures
-
-# Assuming A_data_record and B_data_record are TimeStampedBuffer instances with different timestamps
-
-for A_ts, A_data in A_data_record.items():
-
- try:
-
- # Get nearest B data before current A data and remove all B data before (including the returned one)
- B_ts, B_data = B_data_record.pop_last_before(A_ts)
-
- # No data stored before A_ts timestamp
- except KeyError:
-
- pass
-
-```
-
-## Pop last until
-
-![Pop last until](../../img/pop_last_until.png)
-
-The code below shows how to use [pop_last_until](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer.pop_last_until) method in order to synchronise two timestamped data buffers with different timestamps:
-
-``` python
-from argaze import DataStructures
-
-# Assuming A_data_record and B_data_record are TimeStampedBuffer instances with different timestamps
-
-for A_ts, A_data in A_data_record.items():
-
- try:
-
- # Get nearest B data after current A data and remove all B data before
- B_ts, B_data = B_data_record.pop_last_until(A_ts)
-
- # No data stored until A_ts timestamp
- except KeyError:
-
- pass
-
-```
-
-## Get last before
-
-![Get last before](../../img/get_last_before.png)
-
-The code below shows how to use [get_last_before](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer.get_last_before) method in order to synchronise two timestamped data buffers with different timestamps:
-
-``` python
-from argaze import DataStructures
-
-# Assuming A_data_record and B_data_record are TimeStampedBuffer instances with different timestamps
-
-for A_ts, A_data in A_data_record.items():
-
- try:
-
- # Get nearest B data before current A data
- B_ts, B_data = B_data_record.get_last_before(A_ts)
-
- # No data stored before A_ts timestamp
- except KeyError:
-
- pass
-
-```
-
-## Get last until
-
-![Get last until](../../img/get_last_until.png)
-
-The code below shows how to use [get_last_until](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer.get_last_until) method in order to synchronise two timestamped data buffers with different timestamps:
-
-``` python
-from argaze import DataStructures
-
-# Assuming A_data_record and B_data_record are TimeStampedBuffer instances with different timestamps
-
-for A_ts, A_data in A_data_record.items():
-
- try:
-
- # Get nearest B data after current A data
- B_ts, B_data = B_data_record.get_last_until(A_ts)
-
- # No data stored until A_ts timestamp
- except KeyError:
-
- pass
-
-```
diff --git a/docs/user_guide/timestamped_data/introduction.md b/docs/user_guide/timestamped_data/introduction.md
deleted file mode 100644
index 974e2be..0000000
--- a/docs/user_guide/timestamped_data/introduction.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Timestamped data
-================
-
-Working with wearable eye tracker devices implies to handle various timestamped data like gaze positions, pupills diameter, fixations, saccades, ...
-
-This section mainly refers to [DataStructures.TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) class.
diff --git a/docs/user_guide/timestamped_data/ordered_dictionary.md b/docs/user_guide/timestamped_data/ordered_dictionary.md
deleted file mode 100644
index 64dd899..0000000
--- a/docs/user_guide/timestamped_data/ordered_dictionary.md
+++ /dev/null
@@ -1,19 +0,0 @@
-Ordered dictionary
-==================
-
-[TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) class inherits from [OrderedDict](https://docs.python.org/3/library/collections.html#collections.OrderedDict) as data are de facto ordered by time.
-
-Any data type can be stored using int or float keys as timestamp.
-
-```python
-from argaze import DataStructures
-
-# Create a timestamped data buffer
-ts_data = DataStructures.TimeStampedBuffer()
-
-# Store any data type using numeric keys
-ts_data[0] = 123
-ts_data[0.1] = "message"
-ts_data[0.23] = {"key": value}
-...
-```
diff --git a/docs/user_guide/timestamped_data/pandas_dataframe_conversion.md b/docs/user_guide/timestamped_data/pandas_dataframe_conversion.md
deleted file mode 100644
index 7614e73..0000000
--- a/docs/user_guide/timestamped_data/pandas_dataframe_conversion.md
+++ /dev/null
@@ -1,41 +0,0 @@
----
-title: Pandas DataFrame conversion
----
-
-Pandas DataFrame conversion
-===========================
-
-A [Pandas DataFrame](https://pandas.pydata.org/docs/getting_started/intro_tutorials/01_table_oriented.html#min-tut-01-tableoriented) is a python data structure allowing powerful table processings.
-
-## Export as dataframe
-
-[TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) instance can be converted into dataframe provided that data values are stored as dictionaries.
-
-```python
-from argaze import DataStructures
-
-# Create a timestamped data buffer
-ts_data = DataStructures.TimeStampedBuffer()
-
-# Store various data as dictionary
-ts_data[10] = {"A_key": 0, "B_key": 0.123}}
-ts_data[20] = {"A_key": 4, "B_key": 0.567}}
-ts_data[30] = {"A_key": 8, "B_key": 0.901}}
-...
-
-# Convert timestamped data buffer into dataframe
-ts_buffer_dataframe = ts_buffer.as_dataframe()
-```
-
-ts_buffer_dataframe would look like:
-
-|timestamp|A_key|B_key|
-|:--------|:----|:----|
-|10 |0 |0.123|
-|20 |4 |0.567|
-|30 |8 |0.901|
-|... |... |... |
-
-## Import from dataframe
-
-Reversely, [TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) instance can be created from dataframe, as a result of which each dataframe columns label will become a key of data value dictionary. Notice that the column containing timestamp values have to be called 'timestamp'.
diff --git a/docs/user_guide/timestamped_data/saving_and_loading.md b/docs/user_guide/timestamped_data/saving_and_loading.md
deleted file mode 100644
index 4e6a094..0000000
--- a/docs/user_guide/timestamped_data/saving_and_loading.md
+++ /dev/null
@@ -1,14 +0,0 @@
-Saving and loading
-==================
-
-[TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) instance can be saved as and loaded from JSON file format.
-
-```python
-
-# Save
-ts_data.to_json('./data.json')
-
-# Load
-ts_data = DataStructures.TimeStampedBuffer.from_json('./data.json')
-
-```
diff --git a/docs/user_guide/utils/ready-made_scripts.md b/docs/user_guide/utils/ready-made_scripts.md
index bc8b277..55258e9 100644
--- a/docs/user_guide/utils/ready-made_scripts.md
+++ b/docs/user_guide/utils/ready-made_scripts.md
@@ -9,10 +9,10 @@ Collection of command-line scripts to provide useful features.
!!! note
*Use -h option to get command arguments documentation.*
-## ArUco scene exporter
+## ArUco markers group exporter
-Load a MOVIE with ArUco markers inside and select image into it, detect ArUco markers belonging to DICT_APRILTAG_16h5 dictionary with 5cm size into the selected image thanks to given OPTIC_PARAMETERS and DETECTOR_PARAMETERS then, export detected ArUco markers scene as .obj file into an *./src/argaze/utils/_export/scenes* folder.
+Load a MOVIE and an ArUcoCamera CONFIGURATION to detect ArUco markers inside a selected movie frame then, export detected ArUco markers group as .obj file into an OUTPUT folder.
```shell
-python ./src/argaze/utils/aruco_markers_scene_export.py MOVIE DICT_APRILTAG_16h5 5 OPTIC_PARAMETERS DETECTOR_PARAMETERS -o ./src/argaze/utils/_export/scenes
+python ./src/argaze/utils/aruco_markers_group_export.py MOVIE CONFIGURATION -o OUTPUT
``` \ No newline at end of file
diff --git a/mkdocs.yml b/mkdocs.yml
index c2f4c53..f988ed0 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -8,6 +8,7 @@ nav:
- user_guide/gaze_analysis_pipeline/introduction.md
- user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md
- user_guide/gaze_analysis_pipeline/configuration_and_execution.md
+ - user_guide/gaze_analysis_pipeline/aoi_2d_description.md
- user_guide/gaze_analysis_pipeline/aoi_analysis.md
- user_guide/gaze_analysis_pipeline/visualisation.md
- user_guide/gaze_analysis_pipeline/logging.md
@@ -24,39 +25,15 @@ nav:
- ArUco markers pipeline:
- user_guide/aruco_markers_pipeline/introduction.md
- user_guide/aruco_markers_pipeline/aruco_markers_description.md
- - user_guide/aruco_markers_pipeline/aoi_description.md
- user_guide/aruco_markers_pipeline/configuration_and_execution.md
- user_guide/aruco_markers_pipeline/pose_estimation.md
- - user_guide/aruco_markers_pipeline/aoi_projection.md
+ - user_guide/aruco_markers_pipeline/aoi_3d_description.md
+ - user_guide/aruco_markers_pipeline/aoi_3d_projection.md
+ - user_guide/aruco_markers_pipeline/aoi_3d_frame.md
- Advanced Topics:
+ - user_guide/aruco_markers_pipeline/advanced_topics/scripting.md
- user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md
-
-# - ArUco Markers:
-# - user_guide/aruco_markers/introduction.md
-# - user_guide/aruco_markers/dictionary_selection.md
-# - user_guide/aruco_markers/markers_creation.md
-# - user_guide/aruco_markers/camera_calibration.md
-# - user_guide/aruco_markers/markers_detection.md
-# - user_guide/aruco_markers/markers_pose_estimation.md
-# - user_guide/aruco_markers/markers_scene_description.md
-# - Areas Of Interest:
-# - user_guide/areas_of_interest/introduction.md
-# - user_guide/areas_of_interest/aoi_scene_description.md
-# - user_guide/areas_of_interest/aoi_scene_projection.md
-# - user_guide/areas_of_interest/vision_cone_filtering.md
-# - user_guide/areas_of_interest/aoi_matching.md
-# - user_guide/areas_of_interest/heatmap.md
-# - Gaze Analysis:
-# - user_guide/gaze_analysis/introduction.md
-# - user_guide/gaze_analysis/gaze_position.md
-# - user_guide/gaze_analysis/gaze_movement.md
-# - user_guide/gaze_analysis/scan_path.md
-# - Timestamped data:
-# - user_guide/timestamped_data/introduction.md
-# - user_guide/timestamped_data/ordered_dictionary.md
-# - user_guide/timestamped_data/saving_and_loading.md
-# - user_guide/timestamped_data/data_synchronisation.md
-# - user_guide/timestamped_data/pandas_dataframe_conversion.md
+ - user_guide/aruco_markers_pipeline/advanced_topics/aruco_detector_configuration.md
- utils:
- user_guide/utils/ready-made_scripts.md
- user_guide/utils/demonstrations_scripts.md
diff --git a/setup.py b/setup.py
index 358c19e..706f414 100644
--- a/setup.py
+++ b/setup.py
@@ -35,7 +35,7 @@ setup(
packages=find_packages(where='src'),
python_requires='>=3.11',
- install_requires=['opencv-python>=4.7.0', 'opencv-contrib-python>=4.7.0', 'numpy', 'pandas', 'matplotlib', 'shapely', 'lempel_ziv_complexity', 'scipy'],
+ install_requires=['opencv-python>=4.7.0', 'opencv-contrib-python>=4.7.0', 'numpy', 'pandas', 'matplotlib', 'shapely', 'lempel_ziv_complexity', 'scipy', 'scikit-learn'],
project_urls={
'Bug Reports': 'https://git.recherche.enac.fr/projects/argaze/issues',
diff --git a/src/argaze.test/AreaOfInterest/AOI2DScene.py b/src/argaze.test/AreaOfInterest/AOI2DScene.py
index 4e96e98..10ff430 100644
--- a/src/argaze.test/AreaOfInterest/AOI2DScene.py
+++ b/src/argaze.test/AreaOfInterest/AOI2DScene.py
@@ -187,14 +187,14 @@ class TestTimeStampedAOIScenesClass(unittest.TestCase):
aoi_2D_B = AOIFeatures.AreaOfInterest([[1, 1], [1, 2], [2, 2], [2, 1]])
aoi_2d_scene = AOI2DScene.AOI2DScene({"A": aoi_2D_A, "B": aoi_2D_B})
- ts_aois_scenes = AOIFeatures.TimeStampedAOIScenes()
+ ts_aoi_scenes = AOIFeatures.TimeStampedAOIScenes()
- ts_aois_scenes[0] = aoi_2d_scene
+ ts_aoi_scenes[0] = aoi_2d_scene
# Check that only AOIScene can be added
with self.assertRaises(AssertionError):
- ts_aois_scenes[1] = "This string is not an AOI2DScene"
+ ts_aoi_scenes[1] = "This string is not an AOI2DScene"
if __name__ == '__main__':
diff --git a/src/argaze.test/AreaOfInterest/AOI3DScene.py b/src/argaze.test/AreaOfInterest/AOI3DScene.py
index b386432..d09f2a8 100644
--- a/src/argaze.test/AreaOfInterest/AOI3DScene.py
+++ b/src/argaze.test/AreaOfInterest/AOI3DScene.py
@@ -107,14 +107,14 @@ class TestTimeStampedAOIScenesClass(unittest.TestCase):
aoi_3D_B = AOIFeatures.AreaOfInterest([[1, 1, 0], [1, 2, 0], [2, 2, 0], [2, 1, 0]])
aoi_3d_scene = AOI3DScene.AOI3DScene({"A": aoi_3D_A, "B": aoi_3D_B})
- ts_aois_scenes = AOIFeatures.TimeStampedAOIScenes()
+ ts_aoi_scenes = AOIFeatures.TimeStampedAOIScenes()
- ts_aois_scenes[0] = aoi_3d_scene
+ ts_aoi_scenes[0] = aoi_3d_scene
# Check that only AOIScene can be added
with self.assertRaises(AssertionError):
- ts_aois_scenes[1] = "This string is not an AOI3DScene"
+ ts_aoi_scenes[1] = "This string is not an AOI3DScene"
if __name__ == '__main__':
diff --git a/src/argaze.test/AreaOfInterest/AOIFeatures.py b/src/argaze.test/AreaOfInterest/AOIFeatures.py
index cc75ed8..cb8fb52 100644
--- a/src/argaze.test/AreaOfInterest/AOIFeatures.py
+++ b/src/argaze.test/AreaOfInterest/AOIFeatures.py
@@ -118,13 +118,17 @@ class TestAreaOfInterestClass(unittest.TestCase):
aoi_2D = AOIFeatures.AreaOfInterest([[0, 0], [0, 2], [2, 2], [2, 0]])
- self.assertEqual(aoi_2D.inner_axis((1, 1)), (0.5, 0.5))
+ self.assertEqual(aoi_2D.inner_axis(1, 1), (0.5, 0.5))
def test_outter_axis(self):
aoi_2D = AOIFeatures.AreaOfInterest([[0, 0], [0, 2], [2, 2], [2, 0]])
- self.assertEqual(aoi_2D.outter_axis((0.5, 0.5)), (1, 1))
+ self.assertEqual(aoi_2D.outter_axis(0.5, 0.5), (1, 1))
+
+ aoi_3D = AOIFeatures.AreaOfInterest([[1, 0, 0], [1, 0, 2], [1, 2, 2], [1, 2, 0]])
+
+ self.assertEqual(aoi_3D.outter_axis(0.5, 0.5), (1, 1, 1))
def test_circle_intersection(self):
diff --git a/src/argaze.test/GazeAnalysis/ExploitExploreRatio.py b/src/argaze.test/GazeAnalysis/ExploreExploitRatio.py
index 0e6b74a..7b323d4 100644
--- a/src/argaze.test/GazeAnalysis/ExploitExploreRatio.py
+++ b/src/argaze.test/GazeAnalysis/ExploreExploitRatio.py
@@ -10,7 +10,7 @@ __license__ = "BSD"
import unittest
from argaze import GazeFeatures
-from argaze.GazeAnalysis import ExploitExploreRatio
+from argaze.GazeAnalysis import ExploreExploitRatio
from argaze.utils import UtilsFeatures
GazeFeaturesTest = UtilsFeatures.importFromTestPackage('GazeFeatures')
@@ -21,7 +21,7 @@ class TestScanPathAnalyzer(unittest.TestCase):
def test_analyze(self):
"""Test analyze method."""
- xxr_analyzer = ExploitExploreRatio.ScanPathAnalyzer()
+ xxr_analyzer = ExploreExploitRatio.ScanPathAnalyzer()
scan_path = GazeFeaturesTest.build_scan_path(10)
@@ -31,7 +31,7 @@ class TestScanPathAnalyzer(unittest.TestCase):
xxr_analyzer.analyze(scan_path)
# Check exploit explore ratio: it should greater than 1 because of build_scan_path
- self.assertGreaterEqual(xxr_analyzer.exploit_explore_ratio, 1.)
+ self.assertGreaterEqual(xxr_analyzer.explore_exploit_ratio, 1.)
if __name__ == '__main__':
diff --git a/src/argaze.test/GazeFeatures.py b/src/argaze.test/GazeFeatures.py
index d609dd2..b41c7c7 100644
--- a/src/argaze.test/GazeFeatures.py
+++ b/src/argaze.test/GazeFeatures.py
@@ -497,10 +497,10 @@ class TestAOIScanStepClass(unittest.TestCase):
aoi_scan_step = GazeFeatures.AOIScanStep(movements, 'Test')
-def build_aoi_scan_path(expected_aois, aoi_path):
+def build_aoi_scan_path(expected_aoi, aoi_path):
"""Build AOI scan path"""
- aoi_scan_path = GazeFeatures.AOIScanPath(expected_aois)
+ aoi_scan_path = GazeFeatures.AOIScanPath(expected_aoi)
# Append a hidden last step to allow last given step creation
aoi_path.append(aoi_path[-2])
diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py
index 96976c2..5ec6b7e 100644
--- a/src/argaze/ArFeatures.py
+++ b/src/argaze/ArFeatures.py
@@ -7,7 +7,7 @@ __credits__ = []
__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)"
__license__ = "BSD"
-from typing import TypeVar, Tuple
+from typing import TypeVar, Tuple, Any
from dataclasses import dataclass, field
import json
import os
@@ -96,7 +96,7 @@ DEFAULT_ARLAYER_DRAW_PARAMETERS = {
@dataclass
class ArLayer():
"""
- Defines a space where to make matching of gaze movements and AOIs and inside which those matchings need to be analyzed.
+ Defines a space where to make matching of gaze movements and AOI and inside which those matchings need to be analyzed.
Parameters:
name: name of the layer
@@ -180,6 +180,11 @@ class ArLayer():
new_aoi_scene = AOIFeatures.AOIScene.from_json(filepath)
+ # SVG file format for 2D dimension only
+ if file_format == 'svg':
+
+ new_aoi_scene = AOI2DScene.AOI2DScene.from_svg(filepath)
+
# OBJ file format for 3D dimension only
elif file_format == 'obj':
@@ -192,14 +197,16 @@ class ArLayer():
except KeyError:
+ pass
+
# Add AOI 2D Scene by default
new_aoi_scene = AOI2DScene.AOI2DScene()
# Edit expected AOI list by removing AOI with name equals to layer name
- expected_aois = list(new_aoi_scene.keys())
+ expected_aoi = list(new_aoi_scene.keys())
- if new_layer_name in expected_aois:
- expected_aois.remove(new_layer_name)
+ if new_layer_name in expected_aoi:
+ expected_aoi.remove(new_layer_name)
# Load aoi matcher
try:
@@ -223,13 +230,13 @@ class ArLayer():
try:
new_aoi_scan_path_data = layer_data.pop('aoi_scan_path')
- new_aoi_scan_path_data['expected_aois'] = expected_aois
+ new_aoi_scan_path_data['expected_aoi'] = expected_aoi
new_aoi_scan_path = GazeFeatures.AOIScanPath(**new_aoi_scan_path_data)
except KeyError:
new_aoi_scan_path_data = {}
- new_aoi_scan_path_data['expected_aois'] = expected_aois
+ new_aoi_scan_path_data['expected_aoi'] = expected_aoi
new_aoi_scan_path = None
# Load AOI scan path analyzers
@@ -367,6 +374,9 @@ class ArLayer():
# Lock layer exploitation
self.__look_lock.acquire()
+ # Store look execution start date
+ look_start = time.perf_counter()
+
# Update current gaze movement
self.__gaze_movement = gaze_movement
@@ -445,23 +455,13 @@ class ArLayer():
looked_aoi = None
aoi_scan_path_analysis = {}
exception = e
+
+ # Assess total execution time in ms
+ execution_times['total'] = (time.perf_counter() - look_start) * 1e3
# Unlock layer exploitation
self.__look_lock.release()
- # Sum all execution times
- total_execution_time = 0
-
- if execution_times['aoi_matcher']:
-
- total_execution_time += execution_times['aoi_matcher']
-
- for _, aoi_scan_path_analysis_time in execution_times['aoi_scan_step_analyzers'].items():
-
- total_execution_time += aoi_scan_path_analysis_time
-
- execution_times['total'] = total_execution_time
-
# Return look data
return looked_aoi, aoi_scan_path_analysis, execution_times, exception
@@ -471,7 +471,7 @@ class ArLayer():
Parameters:
draw_aoi_scene: AreaOfInterest.AOI2DScene.draw parameters (if None, no aoi scene is drawn)
- draw_aoi_matching: AOIMatcher.draw parameters (which depends of the loaded aoi matcher module, if None, no aoi matching is drawn)
+ draw_aoi_matching: AOIMatcher.draw parameters (which depends of the loaded aoi matcher module, if None, no aoi matching is drawn)
"""
# Use draw_parameters attribute if no parameters
@@ -484,7 +484,7 @@ class ArLayer():
# Draw aoi if required
if draw_aoi_scene is not None:
-
+
self.aoi_scene.draw(image, **draw_aoi_scene)
# Draw aoi matching if required
@@ -523,7 +523,8 @@ class ArFrame():
Parameters:
name: name of the frame
- size: defines the dimension of the rectangular area where gaze positions are projected.
+ size: defines the dimension of the rectangular area where gaze positions are projected
+ gaze_position_calibrator: gaze position calibration algoritm
gaze_movement_identifier: gaze movement identification algorithm
filter_in_progress_identification: ignore in progress gaze movement identification
scan_path: scan path object
@@ -537,6 +538,7 @@ class ArFrame():
name: str
size: tuple[int] = field(default=(1, 1))
+ gaze_position_calibrator: GazeFeatures.GazePositionCalibrator = field(default_factory=GazeFeatures.GazePositionCalibrator)
gaze_movement_identifier: GazeFeatures.GazeMovementIdentifier = field(default_factory=GazeFeatures.GazeMovementIdentifier)
filter_in_progress_identification: bool = field(default=True)
scan_path: GazeFeatures.ScanPath = field(default_factory=GazeFeatures.ScanPath)
@@ -600,6 +602,31 @@ class ArFrame():
new_frame_size = (0, 0)
+ # Load gaze position calibrator
+ try:
+
+ gaze_position_calibrator_value = frame_data.pop('gaze_position_calibrator')
+
+ # str: relative path to file
+ if type(gaze_position_calibrator_value) == str:
+
+ filepath = os.path.join(working_directory, gaze_position_calibrator_value)
+ file_format = filepath.split('.')[-1]
+
+ # JSON file format
+ if file_format == 'json':
+
+ new_gaze_position_calibrator = GazeFeatures.GazePositionCalibrator.from_json(filepath)
+
+ # dict:
+ else:
+
+ new_gaze_position_calibrator = GazeFeatures.GazePositionCalibrator.from_dict(gaze_position_calibrator_value)
+
+ except KeyError:
+
+ new_gaze_position_calibrator = None
+
# Load gaze movement identifier
try:
@@ -728,11 +755,6 @@ class ArFrame():
# Create layer
new_layer = ArLayer.from_dict(layer_data, working_directory)
- # Project 3D aoi scene layer to get only 2D aoi scene
- if new_layer.aoi_scene.dimension == 3:
-
- new_layer.aoi_scene = new_layer.aoi_scene.orthogonal_projection * new_frame_size
-
# Append new layer
new_layers[layer_name] = new_layer
@@ -761,6 +783,7 @@ class ArFrame():
# Create frame
return ArFrame(new_frame_name, \
new_frame_size, \
+ new_gaze_position_calibrator, \
new_gaze_movement_identifier, \
filter_in_progress_identification, \
new_scan_path, \
@@ -808,7 +831,7 @@ class ArFrame():
return self.__ts_logs
- def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition = GazeFeatures.UnvalidGazePosition()) -> Tuple[GazeFeatures.GazeMovement, dict, dict, dict, Exception]:
+ def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition = GazeFeatures.UnvalidGazePosition()) -> Tuple[GazeFeatures.GazePosition, GazeFeatures.GazeMovement, dict, dict, dict, Exception]:
"""
Project gaze position into frame.
@@ -820,6 +843,7 @@ class ArFrame():
gaze_position: gaze position to project
Returns:
+ current_gaze_position: calibrated gaze position if gaze_position_calibrator is instanciated else, given gaze position.
identified_gaze_movement: identified gaze movement from incoming consecutive timestamped gaze positions if gaze_movement_identifier is instanciated. Current gaze movement if filter_in_progress_identification is False.
scan_path_analysis: scan path analysis at each new scan step if scan_path is instanciated.
layers_analysis: aoi scan path analysis at each new aoi scan step for each instanciated layers aoi scan path.
@@ -830,8 +854,8 @@ class ArFrame():
# Lock frame exploitation
self.__look_lock.acquire()
- # Update current gaze position
- self.__gaze_position = gaze_position
+ # Store look execution start date
+ look_start = time.perf_counter()
# No gaze movement identified by default
identified_gaze_movement = GazeFeatures.UnvalidGazeMovement()
@@ -855,6 +879,16 @@ class ArFrame():
try:
+ # Apply gaze position calibration
+ if self.gaze_position_calibrator is not None:
+
+ self.__gaze_position = self.gaze_position_calibrator.apply(gaze_position)
+
+ # Or update gaze position at least
+ else:
+
+ self.__gaze_position = gaze_position
+
# Identify gaze movement
if self.gaze_movement_identifier is not None:
@@ -944,45 +978,29 @@ class ArFrame():
print('Warning: the following error occurs in ArFrame.look method:', e)
+ self.__gaze_position = GazeFeatures.UnvalidGazePosition()
identified_gaze_movement = GazeFeatures.UnvalidGazeMovement()
scan_step_analysis = {}
layer_analysis = {}
exception = e
-
- # Unlock frame exploitation
- self.__look_lock.release()
-
- # Sum all execution times
- total_execution_time = 0
-
- if execution_times['gaze_movement_identifier']:
-
- total_execution_time += execution_times['gaze_movement_identifier']
-
- for _, scan_step_analysis_time in execution_times['scan_step_analyzers'].items():
-
- total_execution_time += scan_step_analysis_time
-
- if execution_times['heatmap']:
- total_execution_time += execution_times['heatmap']
+ # Assess total execution time in ms
+ execution_times['total'] = (time.perf_counter() - look_start) * 1e3
- for _, layer_execution_times in execution_times['layers'].items():
-
- total_execution_time += layer_execution_times['total']
-
- execution_times['total'] = total_execution_time
+ # Unlock frame exploitation
+ self.__look_lock.release()
# Return look data
- return identified_gaze_movement, scan_step_analysis, layer_analysis, execution_times, exception
+ return self.__gaze_position, identified_gaze_movement, scan_step_analysis, layer_analysis, execution_times, exception
- def __image(self, background_weight: float = None, heatmap_weight: float = None, draw_scan_path: dict = None, draw_layers: dict = None, draw_gaze_positions: dict = None, draw_fixations: dict = None, draw_saccades: dict = None) -> numpy.array:
+ def __image(self, background_weight: float = None, heatmap_weight: float = None, draw_gaze_position_calibrator: dict = None, draw_scan_path: dict = None, draw_layers: dict = None, draw_gaze_positions: dict = None, draw_fixations: dict = None, draw_saccades: dict = None) -> numpy.array:
"""
Get background image with overlaid visualisations.
Parameters:
background_weight: weight of background overlay
heatmap_weight: weight of heatmap overlay
+ draw_gaze_position_calibrator: [GazeFeatures.GazePositionCalibrator.draw](argaze.md/#argaze.GazeFeatures.GazePositionCalibrator.draw) parameters (if None, nothing is drawn)
draw_scan_path: [GazeFeatures.ScanPath.draw](argaze.md/#argaze.GazeFeatures.ScanPath.draw) parameters (if None, no scan path is drawn)
draw_layers: dictionary of [ArLayer.draw](argaze.md/#argaze.ArFeatures.ArLayer.draw) parameters per layer (if None, no layer is drawn)
draw_gaze_positions: [GazeFeatures.GazePosition.draw](argaze.md/#argaze.GazeFeatures.GazePosition.draw) parameters (if None, no gaze position is drawn)
@@ -1015,18 +1033,16 @@ class ArFrame():
image = numpy.full((self.size[1], self.size[0], 3), 0).astype(numpy.uint8)
+ # Draw gaze position calibrator
+ if draw_gaze_position_calibrator is not None:
+
+ self.gaze_position_calibrator.draw(image, size=self.size, **draw_gaze_position_calibrator)
+
# Draw scan path if required
if draw_scan_path is not None and self.scan_path is not None:
self.scan_path.draw(image, **draw_scan_path)
- # Draw layers if required
- if draw_layers is not None:
-
- for layer_name, draw_layer in draw_layers.items():
-
- self.layers[layer_name].draw(image, **draw_layer)
-
# Draw current fixation if required
if draw_fixations is not None and self.gaze_movement_identifier is not None:
@@ -1037,6 +1053,13 @@ class ArFrame():
self.gaze_movement_identifier.current_saccade.draw(image, **draw_saccades)
+ # Draw layers if required
+ if draw_layers is not None:
+
+ for layer_name, draw_layer in draw_layers.items():
+
+ self.layers[layer_name].draw(image, **draw_layer)
+
# Draw current gaze position if required
if draw_gaze_positions is not None:
@@ -1047,7 +1070,7 @@ class ArFrame():
return image
- def image(self, **kwargs) -> numpy.array:
+ def image(self, **kwargs: dict) -> numpy.array:
"""
Get frame image.
@@ -1067,15 +1090,10 @@ class ArScene():
Define abstract Augmented Reality scene with ArLayers and ArFrames inside.
Parameters:
-
name: name of the scene
-
layers: dictionary of ArLayers to project once the pose is estimated: see [project][argaze.ArFeatures.ArScene.project] function below.
-
frames: dictionary to ArFrames to project once the pose is estimated: see [project][argaze.ArFeatures.ArScene.project] function below.
-
angle_tolerance: Optional angle error tolerance to validate marker pose in degree used into [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function.
-
distance_tolerance: Optional distance error tolerance to validate marker pose in centimeter used into [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function.
"""
name: str
@@ -1099,13 +1117,6 @@ class ArScene():
frame.parent = self
- # Preprocess orthogonal projection to speed up further processings
- self.__orthogonal_projection_cache = {}
-
- for layer_name, layer in self.layers.items():
-
- self.__orthogonal_projection_cache[layer_name] = layer.aoi_scene.orthogonal_projection
-
def __str__(self) -> str:
"""
Returns:
@@ -1184,54 +1195,70 @@ class ArScene():
for frame_name, frame_data in scene_data.pop('frames').items():
- # Append name
- frame_data['name'] = frame_name
+ # str: relative path to file
+ if type(frame_data) == str:
+
+ filepath = os.path.join(working_directory, frame_data)
+ file_format = filepath.split('.')[-1]
+
+ # JSON file format for 2D or 3D dimension
+ if file_format == 'json':
- # Create frame
- new_frame = ArFrame.from_dict(frame_data, working_directory)
+ new_frame = ArFrame.from_json(filepath)
- # Look for AOI with same frame name
- aoi_frame = None
- aoi_frame_found = False
- for layer_name, layer in new_layers.items():
+ # dict:
+ else:
+
+ # Append name
+ frame_data['name'] = frame_name
+
+ new_frame = ArFrame.from_dict(frame_data, working_directory)
+
+ # Look for a scene layer with an AOI named like the frame
+ for scene_layer_name, scene_layer in new_layers.items():
try:
- aoi_frame = layer.aoi_scene[frame_name]
- aoi_frame_found = True
+ frame_3d = scene_layer.aoi_scene[frame_name]
- except KeyError:
+ # Check that the frame have a layer named like this scene layer
+ aoi_2d_scene = new_frame.layers[scene_layer_name].aoi_scene
- # AOI name should be unique
- break
+ # Transform 2D frame layer AOI into 3D scene layer AOI
+ # Then, add them to scene layer
+ scene_layer.aoi_scene |= aoi_2d_scene.dimensionalize(frame_3d, new_frame.size)
- if aoi_frame_found:
+ '''DEPRECATED: but maybe still usefull?
+ # Project and reframe each layers into corresponding frame layers
+ for frame_layer_name, frame_layer in new_frame.layers.items():
- # Project and reframe each layers into corresponding frame layers
- for frame_layer_name, frame_layer in new_frame.layers.items():
+ try:
- try:
+ layer = new_layers[frame_layer_name]
+
+ layer_aoi_scene_projection = layer.aoi_scene.orthogonal_projection
+ aoi_frame_projection = layer_aoi_scene_projection[frame_name]
- layer = new_layers[frame_layer_name]
-
- layer_aoi_scene_projection = layer.aoi_scene.orthogonal_projection
- aoi_frame_projection = layer_aoi_scene_projection[frame_name]
+ frame_layer.aoi_scene = layer_aoi_scene_projection.reframe(aoi_frame_projection, new_frame.size)
- frame_layer.aoi_scene = layer_aoi_scene_projection.reframe(aoi_frame_projection, new_frame.size)
+ if frame_layer.aoi_scan_path is not None:
- if frame_layer.aoi_scan_path is not None:
+ # Edit expected AOI list by removing AOI with name equals to frame layer name
+ expected_aoi = list(layer.aoi_scene.keys())
- # Edit expected AOI list by removing AOI with name equals to frame layer name
- expected_aois = list(layer.aoi_scene.keys())
+ if frame_layer_name in expected_aoi:
+ expected_aoi.remove(frame_layer_name)
- if frame_layer_name in expected_aois:
- expected_aois.remove(frame_layer_name)
+ frame_layer.aoi_scan_path.expected_aoi = expected_aoi
- frame_layer.aoi_scan_path.expected_aois = expected_aois
+ except KeyError:
- except KeyError:
+ continue
+ '''
- continue
+ except KeyError as e:
+
+ print(e)
# Append new frame
new_frames[frame_name] = new_frame
@@ -1242,7 +1269,7 @@ class ArScene():
return ArScene(new_scene_name, new_layers, new_frames, **scene_data)
- def estimate_pose(self, detected_features) -> Tuple[numpy.array, numpy.array]:
+ def estimate_pose(self, detected_features: Any) -> Tuple[numpy.array, numpy.array]:
"""Define abstract estimate scene pose method.
Parameters:
@@ -1255,13 +1282,14 @@ class ArScene():
raise NotImplementedError('estimate_pose() method not implemented')
- def project(self, tvec: numpy.array, rvec: numpy.array, visual_hfov: float = 0.) -> Tuple[str, AOI2DScene.AOI2DScene]:
- """Project layers according estimated pose and optional horizontal field of view clipping angle.
+ def project(self, tvec: numpy.array, rvec: numpy.array, visual_hfov: float = 0., visual_vfov: float = 0.) -> Tuple[str, AOI2DScene.AOI2DScene]:
+ """Project layers according estimated pose and optional field of view clipping angles.
Parameters:
tvec: translation vector
rvec: rotation vector
visual_hfov: horizontal field of view clipping angle
+ visual_vfov: vertical field of view clipping angle
Returns:
layer_name: name of projected layer
@@ -1271,6 +1299,7 @@ class ArScene():
for name, layer in self.layers.items():
# Clip AOI out of the visual horizontal field of view (optional)
+ # TODO: use HFOV and VFOV and don't use vision_cone method
if visual_hfov > 0:
# Transform layer aoi scene into camera referential
@@ -1292,7 +1321,7 @@ class ArScene():
# Project layer aoi scene
yield name, aoi_scene_copy.project(tvec, rvec, self.parent.aruco_detector.optic_parameters.K)
- def draw(self, image: numpy.array, **kwargs):
+ def draw(self, image: numpy.array, **kwargs: dict):
"""
Draw scene into image.
@@ -1309,9 +1338,13 @@ class ArCamera(ArFrame):
Parameters:
scenes: all scenes to project into camera frame
+ visual_hfov: Optional angle in degree to clip scenes projection according visual horizontal field of view (HFOV).
+ visual_vfov: Optional angle in degree to clip scenes projection according visual vertical field of view (VFOV).
"""
scenes: dict = field(default_factory=dict)
+ visual_hfov: float = field(default=0.)
+ visual_vfov: float = field(default=0.)
def __post_init__(self):
@@ -1324,31 +1357,45 @@ class ArCamera(ArFrame):
scene.parent = self
# Setup expected aoi of each layer aoi scan path with the aoi of corresponding scene layer
+ # Edit aoi matcher exclude attribute to ignore frame aoi
for layer_name, layer in self.layers.items():
if layer.aoi_scan_path is not None:
- all_aoi_list = []
+ expected_aoi_list = []
+ exclude_aoi_list = []
for scene_name, scene in self.scenes.items():
+ # Append scene layer aoi to corresponding expected camera layer aoi
try:
scene_layer = scene.layers[layer_name]
- all_aoi_list.extend(list(scene_layer.aoi_scene.keys()))
+ expected_aoi_list.extend(list(scene_layer.aoi_scene.keys()))
except KeyError:
continue
- layer.aoi_scan_path.expected_aois = all_aoi_list
+ # Remove scene frame from expected camera layer aoi
+ # Exclude scene frame from camera layer aoi matching
+ for frame_name, frame in scene.frames.items():
+
+ try:
+
+ expected_aoi_list.remove(frame_name)
+ exclude_aoi_list.append(frame_name)
+
+ except ValueError:
+
+ continue
+
+ layer.aoi_scan_path.expected_aoi = expected_aoi_list
+ layer.aoi_matcher.exclude = exclude_aoi_list
# Init a lock to share scene projections into camera frame between multiple threads
self._frame_lock = threading.Lock()
-
- # Define public timestamp buffer to store ignored gaze positions
- self.ignored_gaze_positions = GazeFeatures.TimeStampedGazePositions()
def __str__(self) -> str:
"""
@@ -1399,24 +1446,44 @@ class ArCamera(ArFrame):
yield scene_frame
def watch(self, image: numpy.array) -> Tuple[float, dict]:
- """Detect AR features from image and project scenes into camera frame."""
+ """Detect AR features from image and project scenes into camera frame.
+
+ Returns:
+ detection time: AR features detection time in ms.
+ exception: dictionary with exception raised per scene.
+ """
raise NotImplementedError('watch() method not implemented')
def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition):
"""Project timestamped gaze position into each scene frames.
- !!! warning watch method needs to be called first.
+ Parameters:
+ timestamp: gaze position time stamp (unit does'nt matter)
+ gaze_position: GazePosition object
+
+ !!! warning
+ watch method needs to be called first.
"""
- # Can't use camera frame when it is locked
- if self._frame_lock.locked():
+ # Can't use camera frame while it is locked
+ wait_start = time.perf_counter()
+ waiting_time = 0
+
+ while self._frame_lock.locked():
- # TODO: Store ignored timestamped gaze positions for further projections
- # PB: This would imply to also store frame projections !!!
- self.ignored_gaze_positions[timestamp] = gaze_position
+ time.sleep(1e-6)
+ waiting_time = (time.perf_counter() - wait_start) * 1e3
- return None, None
+ # TODO? return waiting time?
+
+ # TODO? add timeout parameter?
+ #if waiting_time > timeout:
+ # return None, None
+
+ # DEBUG
+ #if waiting_time > 0:
+ # print(f'ArCamera: waiting {waiting_time:.3f} ms before to process gaze position at {timestamp} time.')
# Lock camera frame exploitation
self._frame_lock.acquire()
@@ -1437,7 +1504,7 @@ class ArCamera(ArFrame):
# TODO?: Should we prefer to use camera frame AOIMatcher object?
if aoi_2d.contains_point(gaze_position.value):
- inner_x, inner_y = aoi_2d.clockwise().inner_axis(gaze_position.value)
+ inner_x, inner_y = aoi_2d.clockwise().inner_axis(*gaze_position.value)
# QUESTION: How to project gaze precision?
inner_gaze_position = GazeFeatures.GazePosition((inner_x, inner_y))
@@ -1455,7 +1522,8 @@ class ArCamera(ArFrame):
def map(self):
"""Project camera frame background into scene frames background.
- .. warning:: watch method needs to be called first.
+ !!! warning
+ watch method needs to be called first.
"""
# Can't use camera frame when it is locked
@@ -1477,7 +1545,7 @@ class ArCamera(ArFrame):
# Apply perspective transform algorithm to fill aoi frame background
width, height = frame.size
- destination = numpy.float32([[0, height],[width, height],[width, 0],[0, 0]])
+ destination = numpy.float32([[0, 0], [width, 0], [width, height], [0, height]])
mapping = cv2.getPerspectiveTransform(aoi_2d.astype(numpy.float32), destination)
frame.background = cv2.warpPerspective(self.background, mapping, (width, height))
@@ -1489,7 +1557,7 @@ class ArCamera(ArFrame):
# Unlock camera frame exploitation
self._frame_lock.release()
- def image(self, **kwargs) -> numpy.array:
+ def image(self, **kwargs: dict) -> numpy.array:
"""
Get frame image.
diff --git a/src/argaze/ArUcoMarkers/ArUcoCamera.py b/src/argaze/ArUcoMarkers/ArUcoCamera.py
index 4f555fb..ed6c619 100644
--- a/src/argaze/ArUcoMarkers/ArUcoCamera.py
+++ b/src/argaze/ArUcoMarkers/ArUcoCamera.py
@@ -11,6 +11,7 @@ from typing import TypeVar, Tuple
from dataclasses import dataclass, field
import json
import os
+import time
from argaze import ArFeatures, DataStructures
from argaze.ArUcoMarkers import ArUcoMarkersDictionary, ArUcoDetector, ArUcoOpticCalibrator, ArUcoScene
@@ -37,6 +38,7 @@ class ArUcoCamera(ArFeatures.ArCamera):
"""
Define an ArCamera based on ArUco marker detection.
+ Parameters:
aruco_detector: ArUco marker detector
"""
@@ -73,7 +75,7 @@ class ArUcoCamera(ArFeatures.ArCamera):
return output
@classmethod
- def from_dict(self, aruco_camera_data, working_directory: str = None) -> ArUcoCameraType:
+ def from_dict(self, aruco_camera_data: dict, working_directory: str = None) -> ArUcoCameraType:
"""
Load ArUcoCamera from dictionary.
@@ -140,13 +142,14 @@ class ArUcoCamera(ArFeatures.ArCamera):
return ArUcoCamera.from_dict(aruco_camera_data, working_directory)
- def watch(self, image: numpy.array) -> Tuple[float, dict]:
+ def watch(self, image: numpy.array) -> Tuple[float, float, dict]:
"""Detect environment aruco markers from image and project scenes into camera frame.
Returns:
- - detection_time: aruco marker detection time in ms
- - exceptions: dictionary with exception raised per scene
- """
+ detection time: aruco marker detection time in ms.
+ projection time: scenes projection time in ms.
+ exception: dictionary with exception raised per scene.
+ """
# Detect aruco markers
detection_time = self.aruco_detector.detect_markers(image)
@@ -154,6 +157,9 @@ class ArUcoCamera(ArFeatures.ArCamera):
# Lock camera frame exploitation
self._frame_lock.acquire()
+ # Store projection execution start date
+ projection_start = time.perf_counter()
+
# Fill camera frame background with image
self.background = image
@@ -183,14 +189,11 @@ class ArUcoCamera(ArFeatures.ArCamera):
try:
- # Estimate scene markers poses
- self.aruco_detector.estimate_markers_pose(scene.aruco_markers_group.identifiers)
-
# Estimate scene pose from detected scene markers
- tvec, rmat, _, _ = scene.estimate_pose(self.aruco_detector.detected_markers)
+ tvec, rmat, _ = scene.estimate_pose(self.aruco_detector.detected_markers)
# Project scene into camera frame according estimated pose
- for layer_name, layer_projection in scene.project(tvec, rmat):
+ for layer_name, layer_projection in scene.project(tvec, rmat, self.visual_hfov, self.visual_vfov):
try:
@@ -205,20 +208,23 @@ class ArUcoCamera(ArFeatures.ArCamera):
exceptions[scene_name] = e
+ # Assess projection time in ms
+ projection_time = (time.perf_counter() - projection_start) * 1e3
+
# Unlock camera frame exploitation
self._frame_lock.release()
- # Return dection time and exceptions
- return detection_time, exceptions
+ # Return detection time, projection time and exceptions
+ return detection_time, projection_time, exceptions
- def __image(self, draw_detected_markers: dict = None, draw_scenes: dict = None, draw_optic_parameters_grid: dict = None, **kwargs) -> numpy.array:
+ def __image(self, draw_detected_markers: dict = None, draw_scenes: dict = None, draw_optic_parameters_grid: dict = None, **kwargs: dict) -> numpy.array:
"""Get frame image with ArUco detection visualisation.
Parameters:
- draw_detected_markers: ArucoMarker.draw parameters (if None, no marker drawn)
- draw_scenes: ArUcoScene.draw parameters (if None, no scene drawn)
- draw_optic_parameters_grid: OpticParameter.draw parameters (if None, no grid drawn)
- kwargs: ArCamera.image parameters
+ draw_detected_markers: ArucoMarker.draw parameters (if None, no marker drawn)
+ draw_scenes: ArUcoScene.draw parameters (if None, no scene drawn)
+ draw_optic_parameters_grid: OpticParameter.draw parameters (if None, no grid drawn)
+ kwargs: ArCamera.image parameters
"""
# Can't use camera frame when it is locked
@@ -253,7 +259,7 @@ class ArUcoCamera(ArFeatures.ArCamera):
return image
- def image(self, **kwargs) -> numpy.array:
+ def image(self, **kwargs: dict) -> numpy.array:
"""
Get frame image.
diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py
index 82c9394..e62a42e 100644
--- a/src/argaze/ArUcoMarkers/ArUcoDetector.py
+++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py
@@ -38,7 +38,8 @@ ArUcoDetectorType = TypeVar('ArUcoDetector', bound="ArUcoDetector")
class DetectorParameters():
"""Wrapper class around ArUco marker detector parameters.
- .. note:: More details on [opencv page](https://docs.opencv.org/4.x/d1/dcd/structcv_1_1aruco_1_1DetectorParameters.html)
+ !!! note
+ More details on [opencv page](https://docs.opencv.org/4.x/d1/dcd/structcv_1_1aruco_1_1DetectorParameters.html)
"""
__parameters = aruco.DetectorParameters()
@@ -71,7 +72,8 @@ class DetectorParameters():
'minOtsuStdDev',
'perspectiveRemoveIgnoredMarginPerCell',
'perspectiveRemovePixelPerCell',
- 'polygonalApproxAccuracyRate'
+ 'polygonalApproxAccuracyRate',
+ 'useAruco3Detection'
]
def __init__(self, **kwargs):
@@ -98,8 +100,17 @@ class DetectorParameters():
return DetectorParameters(**json.load(configuration_file))
- def __str__(self, print_all=False) -> str:
- """Detector paremeters string representation."""
+ def __str__(self) -> str:
+ """Detector parameters string representation."""
+
+ return f'{self}'
+
+ def __format__(self, spec: str) -> str:
+ """Formated detector parameters string representation.
+
+ Parameters:
+ spec: 'modified' to get only modified parameters.
+ """
output = ''
@@ -109,7 +120,7 @@ class DetectorParameters():
output += f'\t*{parameter}: {getattr(self.__parameters, parameter)}\n'
- elif print_all:
+ elif spec == "":
output += f'\t{parameter}: {getattr(self.__parameters, parameter)}\n'
@@ -121,26 +132,24 @@ class DetectorParameters():
@dataclass
class ArUcoDetector():
- """ArUco markers detector."""
+ """ArUco markers detector.
- dictionary: ArUcoMarkersDictionary.ArUcoMarkersDictionary = field(default_factory=ArUcoMarkersDictionary.ArUcoMarkersDictionary)
- """ArUco markers dictionary to detect."""
+ Parameters:
+ dictionary: ArUco markers dictionary to detect.
+ marker_size: Size of ArUco markers to detect in centimeter.
+ optic_parameters: Optic parameters to use for ArUco detection into image.
+ parameters: ArUco detector parameters.
+ """
+ dictionary: ArUcoMarkersDictionary.ArUcoMarkersDictionary = field(default_factory=ArUcoMarkersDictionary.ArUcoMarkersDictionary)
marker_size: float = field(default=0.)
- """Size of ArUco markers to detect in centimeter."""
-
optic_parameters: ArUcoOpticCalibrator.OpticParameters = field(default_factory=ArUcoOpticCalibrator.OpticParameters)
- """Optic parameters to use for ArUco detection into image."""
-
parameters: DetectorParameters = field(default_factory=DetectorParameters)
- """ArUco detector parameters."""
def __post_init__(self):
# Init detected markers data
self.__detected_markers = {}
- self.__detected_markers_corners = []
- self.__detected_markers_ids = []
# Init detected board data
self.__board = None
@@ -249,39 +258,41 @@ class ArUcoDetector():
def detect_markers(self, image: numpy.array) -> float:
"""Detect all ArUco markers into an image.
- .. danger:: DON'T MIRROR IMAGE
- It makes the markers detection to fail.
+ !!! danger "DON'T MIRROR IMAGE"
+ It makes the markers detection to fail.
+
+ !!! danger "DON'T UNDISTORED IMAGE"
+ Camera intrisic parameters and distorsion coefficients are used later during pose estimation.
Returns:
- - detection time: marker detection time in ms
+ detection time: marker detection time in ms.
"""
# Reset detected markers data
- self.__detected_markers, self.__detected_markers_corners, self.__detected_markers_ids = {}, [], []
+ self.__detected_markers, detected_markers_corners, detected_markers_ids = {}, [], []
# Store marker detection start date
detection_start = time.perf_counter()
# Detect markers into gray picture
- self.__detected_markers_corners, self.__detected_markers_ids, _ = aruco.detectMarkers(cv.cvtColor(image, cv.COLOR_BGR2GRAY), self.dictionary.markers, parameters = self.parameters.internal)
+ detected_markers_corners, detected_markers_ids, _ = aruco.detectMarkers(cv.cvtColor(image, cv.COLOR_BGR2GRAY), self.dictionary.markers, parameters = self.parameters.internal)
# Assess marker detection time in ms
detection_time = (time.perf_counter() - detection_start) * 1e3
# Is there detected markers ?
- if len(self.__detected_markers_corners) > 0:
+ if len(detected_markers_corners) > 0:
# Transform markers ids array into list
- self.__detected_markers_ids = self.__detected_markers_ids.T[0]
+ detected_markers_ids = detected_markers_ids.T[0]
# Gather detected markers data and update metrics
self.__detection_count += 1
- for i, marker_id in enumerate(self.__detected_markers_ids):
+ for i, marker_id in enumerate(detected_markers_ids):
marker = ArUcoMarker.ArUcoMarker(self.dictionary, marker_id, self.marker_size)
-
- marker.corners = self.__detected_markers_corners[i]
+ marker.corners = detected_markers_corners[i][0]
# No pose estimation: call estimate_markers_pose to get one
marker.translation = numpy.empty([0])
@@ -290,6 +301,7 @@ class ArUcoDetector():
self.__detected_markers[marker_id] = marker
+ # Update metrics
self.__detected_ids.append(marker_id)
return detection_time
@@ -298,31 +310,28 @@ class ArUcoDetector():
"""Estimate pose of current detected markers or of given markers id list."""
# Is there detected markers ?
- if len(self.__detected_markers_corners) > 0:
+ if len(self.__detected_markers) > 0:
- # Is there a marker selection ?
- if len(markers_ids) > 0:
+ # Select all markers by default
+ if len(markers_ids) == 0:
- selected_markers_corners = tuple()
- selected_markers_ids = []
+ markers_ids = self.__detected_markers.keys()
- for i, marker_id in enumerate(self.__detected_markers_ids):
+ # Prepare data for aruco.estimatePoseSingleMarkers function
+ selected_markers_corners = tuple()
+ selected_markers_ids = []
- if marker_id in markers_ids:
+ for marker_id, marker in self.__detected_markers.items():
- selected_markers_corners += (self.__detected_markers_corners[i],)
- selected_markers_ids.append(marker_id)
+ if marker_id in markers_ids:
- # Otherwise, estimate pose of all markers
- else:
-
- selected_markers_corners = self.__detected_markers_corners
- selected_markers_ids = self.__detected_markers_ids
+ selected_markers_corners += (marker.corners,)
+ selected_markers_ids.append(marker_id)
# Estimate pose of selected markers
if len(selected_markers_corners) > 0:
- markers_rvecs, markers_tvecs, markers_points = aruco.estimatePoseSingleMarkers(selected_markers_corners, self.marker_size, numpy.array(self.optic_parameters.K), numpy.array(self.optic_parameters.D))
+ markers_rvecs, markers_tvecs, markers_points = aruco.estimatePoseSingleMarkers(selected_markers_corners, self.marker_size, numpy.array(self.optic_parameters.K), numpy.array(self.optic_parameters.D))
for i, marker_id in enumerate(selected_markers_ids):
@@ -330,7 +339,8 @@ class ArUcoDetector():
marker.translation = markers_tvecs[i][0]
marker.rotation, _ = cv.Rodrigues(markers_rvecs[i][0])
- marker.points = markers_points.reshape(4, 3)
+
+ marker.points = markers_points.reshape(4, 3).dot(marker.rotation) + marker.translation
@property
def detected_markers(self) -> dict[ArUcoMarkerType]:
@@ -361,19 +371,19 @@ class ArUcoDetector():
def detect_board(self, image: numpy.array, board, expected_markers_number):
"""Detect ArUco markers board in image setting up the number of detected markers needed to agree detection.
- .. danger:: DON'T MIRROR IMAGE
- It makes the markers detection to fail.
+ !!! danger "DON'T MIRROR IMAGE"
+ It makes the markers detection to fail.
"""
# detect markers from gray picture
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
- self.__detected_markers_corners, self.__detected_markers_ids, _ = aruco.detectMarkers(gray, self.dictionary.markers, parameters = self.parameters.internal)
+ detected_markers_corners, detected_markers_ids, _ = aruco.detectMarkers(gray, self.dictionary.markers, parameters = self.parameters.internal)
# if all board markers are detected
- if len(self.__detected_markers_corners) == expected_markers_number:
+ if len(detected_markers_corners) == expected_markers_number:
self.__board = board
- self.__board_corners_number, self.__board_corners, self.__board_corners_ids = aruco.interpolateCornersCharuco(self.__detected_markers_corners, self.__detected_markers_ids, gray, self.__board.model)
+ self.__board_corners_number, self.__board_corners, self.__board_corners_ids = aruco.interpolateCornersCharuco(detected_markers_corners, detected_markers_ids, gray, self.__board.model)
else:
@@ -398,9 +408,11 @@ class ArUcoDetector():
@property
def detection_metrics(self) -> Tuple[int, dict]:
"""Get marker detection metrics.
+
Returns:
- number of detect function call
- dict with number of detection for each marker identifier"""
+ number of detect function call
+ dict with number of detection for each marker identifier
+ """
return self.__detection_count, Counter(self.__detected_ids)
diff --git a/src/argaze/ArUcoMarkers/ArUcoMarker.py b/src/argaze/ArUcoMarkers/ArUcoMarker.py
index 57bd8bd..0f368f6 100644
--- a/src/argaze/ArUcoMarkers/ArUcoMarker.py
+++ b/src/argaze/ArUcoMarkers/ArUcoMarker.py
@@ -29,7 +29,7 @@ class ArUcoMarker():
"""Size of marker in centimeters."""
corners: numpy.array = field(init=False, repr=False)
- """Estimated 2D corner positions in camera image referential."""
+ """Estimated 2D corners position in camera image referential."""
translation: numpy.array = field(init=False, repr=False)
"""Estimated 3D center position in camera world referential."""
@@ -68,7 +68,7 @@ class ArUcoMarker():
# Draw marker if required
if color is not None:
- aruco.drawDetectedMarkers(image, [self.corners], numpy.array([self.identifier]), color)
+ aruco.drawDetectedMarkers(image, [numpy.array([list(self.corners)])], numpy.array([self.identifier]), color)
# Draw marker axes if pose has been estimated and if required
if self.translation.size == 3 and self.rotation.size == 9 and draw_axes is not None:
diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py
index 5b6c69d..37bceec 100644
--- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py
+++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py
@@ -17,8 +17,7 @@ import re
from argaze.ArUcoMarkers import ArUcoMarkersDictionary, ArUcoMarker, ArUcoOpticCalibrator
import numpy
-import cv2 as cv
-import cv2.aruco as aruco
+import cv2
T0 = numpy.array([0., 0., 0.])
"""Define no translation vector."""
@@ -58,50 +57,31 @@ def is_rotation_matrix(R):
return n < 1e-3
-def make_euler_rotation_vector(R):
-
- assert(is_rotation_matrix(R))
-
- sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])
-
- singular = sy < 1e-6
-
- if not singular :
- x = math.atan2(R[2,1] , R[2,2])
- y = math.atan2(-R[2,0], sy)
- z = math.atan2(R[1,0], R[0,0])
- else :
- x = math.atan2(-R[1,2], R[1,1])
- y = math.atan2(-R[2,0], sy)
- z = 0
-
- return numpy.array([numpy.rad2deg(x), numpy.rad2deg(y), numpy.rad2deg(z)])
-
@dataclass(frozen=True)
class Place():
- """Define a place as a pose and a marker."""
-
- translation: numpy.array
- """Position in group referential."""
+ """Define a place as list of corners position and a marker.
- rotation: numpy.array
- """Rotation in group referential."""
+ Parameters:
+ corners: 3D corners position in group referential.
+ marker: ArUco marker linked to the place.
+ """
+ corners: numpy.array
marker: dict
- """ArUco marker linked to the place."""
@dataclass
class ArUcoMarkersGroup():
- """Handle group of ArUco markers as one unique spatial entity and estimate its pose."""
+ """Handle group of ArUco markers as one unique spatial entity and estimate its pose.
- marker_size: float = field(default=0.)
- """Expected size of all markers in the group."""
+ Parameters:
+ marker_size: expected size of all markers in the group.
+ dictionary: expected dictionary of all markers in the group.
+ places: expected markers place.
+ """
+ marker_size: float = field(default=0.)
dictionary: ArUcoMarkersDictionary.ArUcoMarkersDictionary = field(default_factory=ArUcoMarkersDictionary.ArUcoMarkersDictionary)
- """Expected dictionary of all markers in the group."""
-
places: dict = field(default_factory=dict)
- """Expected markers place"""
def __post_init__(self):
"""Init group pose and places pose."""
@@ -144,12 +124,16 @@ class ArUcoMarkersGroup():
new_marker = ArUcoMarker.ArUcoMarker(self.dictionary, identifier, self.marker_size)
- new_places[identifier] = Place(tvec, rmat, new_marker)
+ # Build marker corners thanks to translation vector and rotation matrix
+ place_corners = numpy.array([[-self.marker_size/2, self.marker_size/2, 0], [self.marker_size/2, self.marker_size/2, 0], [self.marker_size/2, -self.marker_size/2, 0], [-self.marker_size/2, -self.marker_size/2, 0]])
+ place_corners = place_corners.dot(rmat) + tvec
+
+ new_places[identifier] = Place(place_corners, new_marker)
- # else places are configured using detected markers
+ # else places are configured using detected markers estimated points
elif isinstance(data, ArUcoMarker.ArUcoMarker):
- new_places[identifier] = Place(data.translation, data.rotation, data)
+ new_places[identifier] = Place(data.points, data)
# else places are already at expected format
elif (type(identifier) == int) and isinstance(data, Place):
@@ -158,21 +142,15 @@ class ArUcoMarkersGroup():
self.places = new_places
- # Init place consistency
- self.init_places_consistency()
-
@classmethod
def from_obj(self, obj_filepath: str) -> ArUcoMarkersGroupType:
"""Load ArUco markers group from .obj file.
!!! note
- Expected object (o) name format: <DICTIONARY>#<IDENTIFIER>_Marker
+ Expected object (o) name format: <DICTIONARY>#<IDENTIFIER>_Marker
!!! note
- All markers have to belong to the same dictionary.
-
- !!! note
- Marker normal vectors (vn) expected.
+ All markers have to belong to the same dictionary.
"""
@@ -184,8 +162,7 @@ class ArUcoMarkersGroup():
OBJ_RX_DICT = {
'object': re.compile(r'o (.*)#([0-9]+)_(.*)\n'),
'vertice': re.compile(r'v ([+-]?[0-9]*[.]?[0-9]+) ([+-]?[0-9]*[.]?[0-9]+) ([+-]?[0-9]*[.]?[0-9]+)\n'),
- 'normal': re.compile(r'vn ([+-]?[0-9]*[.]?[0-9]+) ([+-]?[0-9]*[.]?[0-9]+) ([+-]?[0-9]*[.]?[0-9]+)\n'),
- 'face': re.compile(r'f ([0-9]+)//([0-9]+) ([0-9]+)//([0-9]+) ([0-9]+)//([0-9]+) ([0-9]+)//([0-9]+)\n'),
+ 'face': re.compile(r'f ([0-9]+) ([0-9]+) ([0-9]+) ([0-9]+)\n'),
'comment': re.compile(r'#(.*)\n') # keep comment regex after object regex because the # is used in object string too
}
@@ -205,7 +182,6 @@ class ArUcoMarkersGroup():
identifier = None
vertices = []
- normals = {}
faces = {}
# Open the file and read through it line by line
@@ -244,15 +220,10 @@ class ArUcoMarkersGroup():
vertices.append(tuple([float(match.group(1)), float(match.group(2)), float(match.group(3))]))
- # Extract normal to calculate rotation matrix
- elif key == 'normal':
-
- normals[identifier] = tuple([float(match.group(1)), float(match.group(2)), float(match.group(3))])
-
# Extract vertice ids
elif key == 'face':
- faces[identifier] = [int(match.group(1)), int(match.group(3)), int(match.group(5)), int(match.group(7))]
+ faces[identifier] = [int(match.group(1)), int(match.group(2)), int(match.group(3)), int(match.group(4))]
# Go to next line
line = file.readline()
@@ -262,32 +233,20 @@ class ArUcoMarkersGroup():
# Retreive marker vertices thanks to face vertice ids
for identifier, face in faces.items():
- # Gather place corners from counter clockwise ordered face vertices
- corners = numpy.array([ vertices[i-1] for i in face ])
-
- # Edit translation (Tp) allowing to move world axis (W) at place axis (P)
- Tp = corners.mean(axis=0)
+ # Gather place corners in clockwise order
+ cw_corners = numpy.array([ vertices[i-1] for i in reversed(face) ])
# Edit place axis from corners positions
- place_x_axis = corners[1:3].mean(axis=0) - Tp
+ place_x_axis = cw_corners[2] - cw_corners[3]
place_x_axis_norm = numpy.linalg.norm(place_x_axis)
- place_x_axis = place_x_axis / place_x_axis_norm
-
- place_y_axis = corners[2:4].mean(axis=0) - Tp
+
+ place_y_axis = cw_corners[0] - cw_corners[3]
place_y_axis_norm = numpy.linalg.norm(place_y_axis)
- place_y_axis = place_y_axis / place_y_axis_norm
- place_z_axis = normals[identifier]
-
- # Edit rotation (Rp) allowing to transform world axis (W) into place axis (P)
- W = numpy.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
- P = numpy.array([place_x_axis, place_y_axis, place_z_axis])
- Rp = W.dot(P.T)
-
# Check axis size: they should be almost equal
if math.isclose(place_x_axis_norm, place_y_axis_norm, rel_tol=1e-3):
- current_marker_size = place_x_axis_norm*2
+ current_marker_size = place_x_axis_norm
# Check that all markers size are almost equal
if new_marker_size > 0:
@@ -300,7 +259,7 @@ class ArUcoMarkersGroup():
# Create a new place related to a new marker
new_marker = ArUcoMarker.ArUcoMarker(new_dictionary, identifier, new_marker_size)
- new_places[identifier] = Place(Tp, Rp, new_marker)
+ new_places[identifier] = Place(cw_corners, new_marker)
except IOError:
raise IOError(f'File not found: {obj_filepath}')
@@ -335,18 +294,7 @@ class ArUcoMarkersGroup():
output += '\n\n\tPlaces:'
for identifier, place in self.places.items():
output += f'\n\t\t- {identifier}:'
- output += f'\n{place.translation}'
- output += f'\n{place.rotation}'
-
- output += '\n\n\tAngle cache:'
- for A_identifier, A_angle_cache in self.__rotation_cache.items():
- for B_identifier, angle in A_angle_cache.items():
- output += f'\n\t\t- {A_identifier}/{B_identifier}: [{angle[0]:3f} {angle[1]:3f} {angle[2]:3f}]'
-
- output += '\n\n\tDistance cache:'
- for A_identifier, A_distance_cache in self.__translation_cache.items():
- for B_identifier, distance in A_distance_cache.items():
- output += f'\n\t\t- {A_identifier}/{B_identifier}: {distance:3f}'
+ output += f'\n{place.corners}'
return output
@@ -360,8 +308,8 @@ class ArUcoMarkersGroup():
"""Sort markers belonging to the group from given detected markers dict (cf ArUcoDetector.detect_markers()).
Returns:
- dict of markers belonging to this group
- dict of remaining markers not belonging to this group
+ dict of markers belonging to this group
+ dict of remaining markers not belonging to this group
"""
group_markers = {}
@@ -379,148 +327,22 @@ class ArUcoMarkersGroup():
return group_markers, remaining_markers
- def init_places_consistency(self):
- """Initialize places consistency to speed up further markers consistency checking."""
-
- # Process expected rotation between places combinations to speed up further calculations
- self.__rotation_cache = {}
- for (A_identifier, A_place), (B_identifier, B_place) in itertools.combinations(self.places.items(), 2):
-
- A = self.places[A_identifier].rotation
- B = self.places[B_identifier].rotation
-
- if numpy.array_equal(A, B):
-
- AB_rvec = [0., 0., 0.]
- BA_rvec = [0., 0., 0.]
-
- else:
-
- # Calculate euler angle representation of AB and BA rotation matrix
- AB_rvec = make_euler_rotation_vector(B.dot(A.T))
- BA_rvec = make_euler_rotation_vector(A.dot(B.T))
-
- try:
- self.__rotation_cache[A_identifier][B_identifier] = AB_rvec
- except:
- self.__rotation_cache[A_identifier] = {B_identifier: AB_rvec}
-
- try:
- self.__rotation_cache[B_identifier][A_identifier] = BA_rvec
- except:
- self.__rotation_cache[B_identifier] = {A_identifier: BA_rvec}
-
- # Process translation between each places combinations to speed up further calculations
- self.__translation_cache = {}
- for (A_identifier, A_place), (B_identifier, B_place) in itertools.combinations(self.places.items(), 2):
-
- A = self.places[A_identifier].translation
- B = self.places[B_identifier].translation
-
- # Calculate translation between A and B position
- AB_tvec = numpy.linalg.norm(B - A)
-
- try:
- self.__translation_cache[A_identifier][B_identifier] = AB_tvec
- except:
- self.__translation_cache[A_identifier] = {B_identifier: AB_tvec}
+ def estimate_pose_from_markers_corners(self, markers: dict, K: numpy.array, D: numpy.array) -> Tuple[bool, numpy.array, numpy.array]:
+ """Estimate pose from markers corners and places corners.
- try:
- self.__translation_cache[B_identifier][A_identifier] = AB_tvec
- except:
- self.__translation_cache[B_identifier] = {A_identifier: AB_tvec}
-
- def check_markers_consistency(self, group_markers: dict, angle_tolerance: float, distance_tolerance: float) -> Tuple[dict, dict, dict]:
- """Evaluate if given markers configuration match related places configuration.
+ Parameters:
+ markers: detected markers to use for pose estimation.
+ K: intrinsic camera parameters
+ D: camera distorsion matrix
Returns:
- dict of consistent markers
- dict of unconsistent markers
- dict of identified distance or angle unconsistencies and out-of-bounds values
+ success: True if the pose estimation succeeded
+ tvec: scene translation vector
+ rvec: scene rotation vector
"""
- consistent_markers = {}
- unconsistencies = {'rotation': {}, 'translation': {}}
-
- for (A_identifier, A_marker), (B_identifier, B_marker) in itertools.combinations(group_markers.items(), 2):
-
- try:
-
- # Rotation matrix from A marker to B marker
- AB = B_marker.rotation.dot(A_marker.rotation.T)
-
- # Calculate euler angle representation of AB rotation matrix
- AB_rvec = make_euler_rotation_vector(AB)
- expected_rvec= self.__rotation_cache[A_identifier][B_identifier]
-
- # Calculate distance between A marker center and B marker center
- AB_tvec = numpy.linalg.norm(A_marker.translation - B_marker.translation)
- expected_tvec = self.__translation_cache[A_identifier][B_identifier]
-
- # Check angle and distance according given tolerance then normalise marker pose
- consistent_rotation = numpy.allclose(AB_rvec, expected_rvec, atol=angle_tolerance)
- consistent_translation = math.isclose(AB_tvec, expected_tvec, abs_tol=distance_tolerance)
-
- if consistent_rotation and consistent_translation:
-
- if A_identifier not in consistent_markers.keys():
-
- # Remember this marker is already validated
- consistent_markers[A_identifier] = A_marker
-
- if B_identifier not in consistent_markers.keys():
-
- # Remember this marker is already validated
- consistent_markers[B_identifier] = B_marker
-
- else:
-
- if not consistent_rotation:
- unconsistencies['rotation'][f'{A_identifier}/{B_identifier}'] = {'current': AB_rvec, 'expected': expected_rvec}
-
- if not consistent_translation:
- unconsistencies['translation'][f'{A_identifier}/{B_identifier}'] = {'current': AB_tvec, 'expected': expected_tvec}
-
- except KeyError:
-
- raise ValueError(f'Marker {A_identifier} or {B_identifier} don\'t belong to the group.')
-
- # Gather unconsistent markers
- unconsistent_markers = {}
-
- for identifier, marker in group_markers.items():
-
- if identifier not in consistent_markers.keys():
-
- unconsistent_markers[identifier] = marker
-
- return consistent_markers, unconsistent_markers, unconsistencies
-
- def estimate_pose_from_single_marker(self, marker: ArUcoMarker.ArUcoMarker) -> Tuple[numpy.array, numpy.array]:
- """Calculate rotation and translation that move a marker to its place."""
-
- # Get the place related to the given marker
- try:
-
- place = self.places[marker.identifier]
-
- # Rotation matrix that transform marker to related place
- self._rotation = marker.rotation.dot(place.rotation.T)
-
- # Translation vector that transform marker to related place
- self._translation = marker.translation - place.translation.dot(place.rotation).dot(marker.rotation.T)
-
- return self._translation, self._rotation
-
- except KeyError:
-
- raise ValueError(f'Marker {marker.identifier} doesn\'t belong to the group.')
-
- def estimate_pose_from_markers(self, markers: dict) -> Tuple[numpy.array, numpy.array]:
- """Calculate average rotation and translation that move markers to their related places."""
-
- rotations = []
- translations = []
+ markers_corners_2d = []
+ places_corners_3d = []
for identifier, marker in markers.items():
@@ -528,72 +350,30 @@ class ArUcoMarkersGroup():
place = self.places[identifier]
- # Rotation matrix that transform marker to related place
- R = marker.rotation.dot(place.rotation.T)
+ for marker_corner in marker.corners:
+ markers_corners_2d.append(list(marker_corner))
- # Translation vector that transform marker to related place
- T = marker.translation - place.translation.dot(place.rotation).dot(marker.rotation.T)
-
- rotations.append(R)
- translations.append(T)
+ for place_corner in place.corners:
+ places_corners_3d.append(list(place_corner))
except KeyError:
raise ValueError(f'Marker {marker.identifier} doesn\'t belong to the group.')
- # Consider ArUcoMarkersGroup rotation as the mean of all marker rotations
- # !!! WARNING !!! This is a bad hack : processing rotations average is a very complex problem that needs to well define the distance calculation method before.
- self._rotation = numpy.mean(numpy.array(rotations), axis=0)
-
- # Consider ArUcoMarkersGroup translation as the mean of all marker translations
- self._translation = numpy.mean(numpy.array(translations), axis=0)
-
- return self._translation, self._rotation
-
- def estimate_pose_from_axis_markers(self, origin_marker: ArUcoMarker.ArUcoMarker, horizontal_axis_marker: ArUcoMarker.ArUcoMarker, vertical_axis_marker: ArUcoMarker.ArUcoMarker) -> Tuple[numpy.array, numpy.array]:
- """Calculate rotation and translation from 3 markers defining an orthogonal axis."""
-
- O_marker = origin_marker
- A_marker = horizontal_axis_marker
- B_marker = vertical_axis_marker
-
- O_place = self.places[O_marker.identifier]
- A_place = self.places[A_marker.identifier]
- B_place = self.places[B_marker.identifier]
+ # SolvPnP using cv2.SOLVEPNP_SQPNP flag
+ # TODO: it works also with cv2.SOLVEPNP_EPNP flag so we need to test which is the faster.
+ # About SolvPnP flags: https://docs.opencv.org/4.x/d5/d1f/calib3d_solvePnP.html
+ success, rvec, tvec = cv2.solvePnP(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), flags=cv2.SOLVEPNP_SQPNP)
- # Place axis
- OA = A_place.translation - O_place.translation
- OA = OA / numpy.linalg.norm(OA)
+ # Refine pose estimation using Gauss-Newton optimisation
+ if success :
- OB = B_place.translation - O_place.translation
- OB = OB / numpy.linalg.norm(OB)
+ rvec, tvec = cv2.solvePnPRefineVVS(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), rvec, tvec)
- # Detect and correct bad place axis orientation
- X_sign = numpy.sign(OA)[0]
- Y_sign = numpy.sign(OB)[1]
+ self._translation = tvec.T
+ self._rotation = rvec.T
- P = numpy.array([OA*X_sign, OB*Y_sign, numpy.cross(OA*X_sign, OB*Y_sign)])
-
- # Marker axis
- OA = A_marker.translation - O_marker.translation
- OA = OA / numpy.linalg.norm(OA)
-
- OB = B_marker.translation - O_marker.translation
- OB = OB / numpy.linalg.norm(OB)
-
- # Detect and correct bad place axis orientation
- X_sign = numpy.sign(OA)[0]
- Y_sign = -numpy.sign(OB)[1]
-
- M = numpy.array([OA*X_sign, OB*Y_sign, numpy.cross(OA*X_sign, OB*Y_sign)])
-
- # Then estimate ArUcoMarkersGroup rotation
- self._rotation = P.dot(M.T)
-
- # Consider ArUcoMarkersGroup translation as the translation of the marker at axis origin
- self._translation = O_marker.translation - O_place.translation.dot(O_place.rotation).dot(M.T)
-
- return self._translation, self._rotation
+ return success, self._translation, self._rotation
@property
def translation(self) -> numpy.array:
@@ -622,15 +402,15 @@ class ArUcoMarkersGroup():
try:
axisPoints = numpy.float32([[length, 0, 0], [0, length, 0], [0, 0, length], [0, 0, 0]]).reshape(-1, 3)
- axisPoints, _ = cv.projectPoints(axisPoints, self._rotation, self._translation, numpy.array(K), numpy.array(D))
+ axisPoints, _ = cv2.projectPoints(axisPoints, self._rotation, self._translation, numpy.array(K), numpy.array(D))
axisPoints = axisPoints.astype(int)
- cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (0, 0, 255), thickness) # X (red)
- cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (0, 255, 0), thickness) # Y (green)
- cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (255, 0, 0), thickness) # Z (blue)
+ cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (0, 0, 255), thickness) # X (red)
+ cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (0, 255, 0), thickness) # Y (green)
+ cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (255, 0, 0), thickness) # Z (blue)
# Ignore errors due to out of field axis: their coordinate are larger than int32 limitations.
- except cv.error:
+ except cv2.error:
pass
def draw_places(self, image: numpy.array, K, D, color: tuple = None, border_size: int = 0):
@@ -642,52 +422,24 @@ class ArUcoMarkersGroup():
try:
- T = self.places[identifier].translation
- R = self.places[identifier].rotation
-
- placePoints = (T + numpy.float32([R.dot([-l, -l, 0]), R.dot([l, -l, 0]), R.dot([l, l, 0]), R.dot([-l, l, 0])])).reshape(-1, 3)
- placePoints, _ = cv.projectPoints(placePoints, self._rotation, self._translation, numpy.array(K), numpy.array(D))
+ placePoints, _ = cv2.projectPoints(place.corners, self._rotation, self._translation, numpy.array(K), numpy.array(D))
placePoints = placePoints.astype(int)
- cv.line(image, tuple(placePoints[0].ravel()), tuple(placePoints[1].ravel()), color, border_size)
- cv.line(image, tuple(placePoints[1].ravel()), tuple(placePoints[2].ravel()), color, border_size)
- cv.line(image, tuple(placePoints[2].ravel()), tuple(placePoints[3].ravel()), color, border_size)
- cv.line(image, tuple(placePoints[3].ravel()), tuple(placePoints[0].ravel()), color, border_size)
+ cv2.line(image, tuple(placePoints[0].ravel()), tuple(placePoints[1].ravel()), color, border_size)
+ cv2.line(image, tuple(placePoints[1].ravel()), tuple(placePoints[2].ravel()), color, border_size)
+ cv2.line(image, tuple(placePoints[2].ravel()), tuple(placePoints[3].ravel()), color, border_size)
+ cv2.line(image, tuple(placePoints[3].ravel()), tuple(placePoints[0].ravel()), color, border_size)
# Ignore errors due to out of field places: their coordinate are larger than int32 limitations.
- except cv.error:
+ except cv2.error:
pass
- def draw_places_axes(self, image: numpy.array, K, D, thickness: int = 0, length: float = 0):
- """Draw group place axes."""
-
- for identifier, place in self.places.items():
-
- try:
-
- T = self.places[identifier].translation
- R = self.places[identifier].rotation
-
- axisPoints = (T + numpy.float32([R.dot([length, 0, 0]), R.dot([0, length, 0]), R.dot([0, 0, length]), R.dot([0, 0, 0])])).reshape(-1, 3)
- axisPoints, _ = cv.projectPoints(axisPoints, self._rotation, self._translation, numpy.array(K), numpy.array(D))
- axisPoints = axisPoints.astype(int)
-
- cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (0, 0, 255), thickness) # X (red)
- cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (0, 255, 0), thickness) # Y (green)
- cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (255, 0, 0), thickness) # Z (blue)
-
- # Ignore errors due to out of field places: their coordinate are larger than int32 limitations.
- except cv.error:
- pass
-
- def draw(self, image: numpy.array, K, D, draw_axes: dict = None, draw_places: dict = None, draw_places_axes: dict = None):
+ def draw(self, image: numpy.array, K, D, draw_axes: dict = None, draw_places: dict = None):
"""Draw group axes and places.
Parameters:
-
draw_axes: draw_axes parameters (if None, no axes drawn)
draw_places: draw_places parameters (if None, no places drawn)
- draw_places_axes: draw_places_axes parameters (if None, no places axes drawn)
"""
# Draw axes if required
@@ -700,11 +452,6 @@ class ArUcoMarkersGroup():
self.draw_places(image, K, D, **draw_places)
- # Draw places axes if required
- if draw_places_axes is not None:
-
- self.draw_places_axes(image, K, D, **draw_places_axes)
-
def to_obj(self, obj_filepath):
"""Save group to .obj file."""
@@ -715,26 +462,19 @@ class ArUcoMarkersGroup():
v_count = 0
- for identifier, place in self.places.items():
+ for p, (identifier, place) in enumerate(self.places.items()):
file.write(f'o {self.dictionary.name}#{identifier}_Marker\n')
vertices = ''
- T = place.translation
- R = place.rotation
-
- points = (T + numpy.float32([R.dot(place.marker.points[0]), R.dot(place.marker.points[1]), R.dot(place.marker.points[2]), R.dot(place.marker.points[3])])).reshape(-1, 3)
-
- print(points)
-
# Write vertices in reverse order
- for i in [3, 2, 1, 0]:
+ for v in [3, 2, 1, 0]:
- file.write(f'v {" ".join(map(str, points[i]))}\n')
+ file.write(f'v {" ".join(map(str, place.corners[v]))}\n')
v_count += 1
vertices += f' {v_count}'
- file.write('s off\n')
+ #file.write('s off\n')
file.write(f'f{vertices}\n')
diff --git a/src/argaze/ArUcoMarkers/ArUcoScene.py b/src/argaze/ArUcoMarkers/ArUcoScene.py
index f6b303a..51dd88c 100644
--- a/src/argaze/ArUcoMarkers/ArUcoScene.py
+++ b/src/argaze/ArUcoMarkers/ArUcoScene.py
@@ -96,14 +96,13 @@ class ArUcoScene(ArFeatures.ArScene):
# Create new aruco scene using temporary ar scene values
return ArUcoScene(aruco_markers_group=new_aruco_markers_group, **temp_ar_scene_values)
- def estimate_pose(self, detected_markers) -> Tuple[numpy.array, numpy.array, str, dict]:
+ def estimate_pose(self, detected_markers) -> Tuple[numpy.array, numpy.array, dict]:
"""Estimate scene pose from detected ArUco markers.
Returns:
- scene translation vector
- scene rotation matrix
- pose estimation strategy
- dict of markers used to estimate the pose
+ scene translation vector
+ scene rotation matrix
+ dict of markers used to estimate the pose
"""
# Pose estimation fails when no marker is detected
@@ -118,26 +117,19 @@ class ArUcoScene(ArFeatures.ArScene):
raise ArFeatures.PoseEstimationFailed('No marker belongs to the scene')
- # Estimate scene pose from unique marker transformations
- elif len(scene_markers) == 1:
+ # Pose estimation fails if only one marker belongs to the scene
+ if len(scene_markers) == 1:
- marker_id, marker = scene_markers.popitem()
- tvec, rmat = self.aruco_markers_group.estimate_pose_from_single_marker(marker)
-
- return tvec, rmat, 'estimate_pose_from_single_marker', {marker_id: marker}
+ raise ArFeatures.PoseEstimationFailed('Only one marker belongs to the scene')
- # Otherwise, check markers consistency
- consistent_markers, unconsistent_markers, unconsistencies = self.aruco_markers_group.check_markers_consistency(scene_markers, self.angle_tolerance, self.distance_tolerance)
+ # Estimate pose from a markers corners
+ success, tvec, rmat = self.aruco_markers_group.estimate_pose_from_markers_corners(scene_markers, self.parent.aruco_detector.optic_parameters.K, self.parent.aruco_detector.optic_parameters.D)
- # Pose estimation fails when no marker passes consistency checking
- if len(consistent_markers) == 0:
+ if not success:
- raise ArFeatures.PoseEstimationFailed('Unconsistent marker poses', unconsistencies)
+ raise ArFeatures.PoseEstimationFailed('Can\'t estimate pose from markers corners positions')
- # Otherwise, estimate scene pose from all consistent markers pose
- tvec, rmat = self.aruco_markers_group.estimate_pose_from_markers(consistent_markers)
-
- return tvec, rmat, 'estimate_pose_from_markers', consistent_markers
+ return tvec, rmat, scene_markers
def draw(self, image: numpy.array, draw_aruco_markers_group: dict = None):
"""
diff --git a/src/argaze/AreaOfInterest/AOI2DScene.py b/src/argaze/AreaOfInterest/AOI2DScene.py
index 73c977f..062044f 100644
--- a/src/argaze/AreaOfInterest/AOI2DScene.py
+++ b/src/argaze/AreaOfInterest/AOI2DScene.py
@@ -10,21 +10,103 @@ __license__ = "BSD"
from typing import TypeVar, Tuple
from argaze import DataStructures
-from argaze.AreaOfInterest import AOIFeatures
+from argaze.AreaOfInterest import AOIFeatures, AOI3DScene
from argaze import GazeFeatures
import cv2
import numpy
+from xml.dom import minidom
AOI2DSceneType = TypeVar('AOI2DScene', bound="AOI2DScene")
# Type definition for type annotation convenience
+AOI3DSceneType = TypeVar('AOI3DScene', bound="AOI3DScene")
+# Type definition for type annotation convenience
+
class AOI2DScene(AOIFeatures.AOIScene):
"""Define AOI 2D scene."""
- def __init__(self, aois_2d: dict = None):
+ def __init__(self, aoi_2d: dict = None):
+
+ super().__init__(2, aoi_2d)
+
+ @classmethod
+ def from_svg(self, svg_filepath: str) -> AOI2DSceneType:
+ """
+ Load areas from .svg file.
+
+ Parameters:
+ svg_filepath: path to svg file
+
+ !!! note
+ Available SVG elements are: path, rect and circle.
+
+ !!! warning
+ Available SVG path d-string commands are: MoveTo (M) LineTo (L) and ClosePath (Z) commands.
+ """
+
+ with minidom.parse(svg_filepath) as description_file:
+
+ new_areas = {}
+
+ # Load SVG path
+ for path in description_file.getElementsByTagName('path'):
+
+ # Convert d-string into array
+ d_string = path.getAttribute('d')
+
+ assert(d_string[0] == 'M')
+ assert(d_string[-1] == 'Z')
+
+ points = [(float(x), float(y)) for x, y in [p.split(',') for p in d_string[1:-1].split('L')]]
+
+ new_areas[path.getAttribute('id')] = AOIFeatures.AreaOfInterest(points)
+
+ # Load SVG rect
+ for rect in description_file.getElementsByTagName('rect'):
+
+ # Convert rect element into dict
+ rect_dict = {
+ "Rectangle": {
+ 'x': float(rect.getAttribute('x')),
+ 'y': float(rect.getAttribute('y')),
+ 'width': float(rect.getAttribute('width')),
+ 'height': float(rect.getAttribute('height'))
+ }
+ }
+
+ new_areas[rect.getAttribute('id')] = AOIFeatures.AreaOfInterest.from_dict(rect_dict)
- super().__init__(2, aois_2d)
+ # Load SVG circle
+ for circle in description_file.getElementsByTagName('circle'):
+
+ # Convert circle element into dict
+ circle_dict = {
+ "Circle": {
+ 'cx': float(circle.getAttribute('cx')),
+ 'cy': float(circle.getAttribute('cy')),
+ 'radius': float(circle.getAttribute('r'))
+ }
+ }
+
+ new_areas[circle.getAttribute('id')] = AOIFeatures.AreaOfInterest.from_dict(circle_dict)
+
+ # Load SVG ellipse
+ for ellipse in description_file.getElementsByTagName('ellipse'):
+
+ # Convert ellipse element into dict
+ ellipse_dict = {
+ "Ellipse": {
+ 'cx': float(circle.getAttribute('cx')),
+ 'cy': float(circle.getAttribute('cy')),
+ 'rx': float(circle.getAttribute('rx')),
+ 'ry': float(circle.getAttribute('ry'))
+ }
+ }
+
+ new_areas[ellipse.getAttribute('id')] = AOIFeatures.AreaOfInterest.from_dict(ellipse_dict)
+
+ return AOI2DScene(new_areas)
def draw(self, image: numpy.array, draw_aoi: dict = None, exclude=[]):
"""Draw AOI polygons on image.
@@ -56,7 +138,7 @@ class AOI2DScene(AOIFeatures.AOIScene):
yield name, aoi, matching
def draw_raycast(self, image: numpy.array, pointer:tuple, exclude=[], base_color=(0, 0, 255), matching_color=(0, 255, 0)):
- """Draw AOIs with their matching status."""
+ """Draw AOI with their matching status."""
for name, aoi, matching in self.raycast(pointer):
@@ -89,6 +171,7 @@ class AOI2DScene(AOIFeatures.AOIScene):
yield name, aoi, matched_region, aoi_ratio, circle_ratio
+ '''DEPRECATED: but maybe still usefull?
def reframe(self, aoi: AOIFeatures.AreaOfInterest, size: tuple) -> AOI2DSceneType:
"""
Reframe whole scene to a scene bounded by a 4 vertices 2D AOI.
@@ -120,3 +203,31 @@ class AOI2DScene(AOIFeatures.AOIScene):
aoi2D_scene[name] = numpy.matmul(aoi2D - Src_origin, M.T)
return aoi2D_scene
+ '''
+ def dimensionalize(self, rectangle_3d: AOIFeatures.AreaOfInterest, size: tuple) -> AOI3DSceneType:
+ """
+ Convert to 3D scene considering it is inside of 3D rectangular frame.
+
+ Parameters:
+ rectangle_3d: rectangle 3D AOI to use as referential plane
+ size: size of the frame in pixel
+
+ Returns:
+ AOI 3D scene
+ """
+
+ assert(rectangle_3d.dimension == 3)
+ assert(rectangle_3d.points_number == 4)
+
+ # Vectorize outter_axis function
+ vfunc = numpy.vectorize(rectangle_3d.outter_axis)
+
+ # Prepare new AOI 3D scene
+ aoi3D_scene = AOI3DScene.AOI3DScene()
+
+ for name, aoi2D in self.items():
+
+ X, Y = (aoi2D / size).T
+ aoi3D_scene[name] = numpy.array(vfunc(X, Y)).T.view(AOIFeatures.AreaOfInterest)
+
+ return aoi3D_scene
diff --git a/src/argaze/AreaOfInterest/AOI3DScene.py b/src/argaze/AreaOfInterest/AOI3DScene.py
index 8ea6048..33a815c 100644
--- a/src/argaze/AreaOfInterest/AOI3DScene.py
+++ b/src/argaze/AreaOfInterest/AOI3DScene.py
@@ -38,15 +38,15 @@ AOI2DSceneType = TypeVar('AOI2DScene', bound="AOI2DScene")
class AOI3DScene(AOIFeatures.AOIScene):
"""Define AOI 3D scene."""
- def __init__(self, aois_3d: dict = None):
+ def __init__(self, aoi_3d: dict = None):
- super().__init__(3, aois_3d)
+ super().__init__(3, aoi_3d)
@classmethod
def from_obj(self, obj_filepath: str) -> AOI3DSceneType:
"""Load AOI3D scene from .obj file."""
- aois_3d = {}
+ aoi_3d = {}
# regex rules for .obj file parsing
OBJ_RX_DICT = {
@@ -108,15 +108,15 @@ class AOI3DScene(AOIFeatures.AOIScene):
file.close()
- # retreive all aoi3D vertices
+ # retreive all aoi3D vertices and sort them in clockwise order
for name, face in faces.items():
- aoi3D = AOIFeatures.AreaOfInterest([ vertices[i-1] for i in face ])
- aois_3d[name] = aoi3D
+ aoi3D = AOIFeatures.AreaOfInterest([ vertices[i-1] for i in reversed(face) ])
+ aoi_3d[name] = aoi3D
except IOError:
raise IOError(f'File not found: {obj_filepath}')
- return AOI3DScene(aois_3d)
+ return AOI3DScene(aoi_3d)
def to_obj(self, obj_filepath: str):
"""Save AOI3D scene into .obj file."""
@@ -149,8 +149,9 @@ class AOI3DScene(AOIFeatures.AOIScene):
file.write('s off\n')
file.write(vertices_ids + '\n')
+ '''DEPRECATED: but maybe still usefull?
@property
- def orthogonal_projection(self) -> AOI2DScene.AOI2DScene:
+ def orthogonal_projection(self) -> AOI2DSceneType:
"""
Orthogonal projection of whole scene.
@@ -169,7 +170,7 @@ class AOI3DScene(AOIFeatures.AOIScene):
K = numpy.array([[scene_size[1]/scene_size[0], 0.0, 0.5], [0.0, 1., 0.5], [0.0, 0.0, 1.0]])
return self.project(tvec, rvec, K)
-
+ '''
def vision_cone(self, cone_radius, cone_height, cone_tip=[0., 0., 0.], cone_direction=[0., 0., 1.]) -> Tuple[AOI3DSceneType, AOI3DSceneType]:
"""Get AOI which are inside and out a given cone field.
diff --git a/src/argaze/AreaOfInterest/AOIFeatures.py b/src/argaze/AreaOfInterest/AOIFeatures.py
index 8987beb..5637baa 100644
--- a/src/argaze/AreaOfInterest/AOIFeatures.py
+++ b/src/argaze/AreaOfInterest/AOIFeatures.py
@@ -11,6 +11,7 @@ from typing import TypeVar, Tuple
from dataclasses import dataclass, field
import json
import os
+import math
from argaze import DataStructures
@@ -41,6 +42,53 @@ class AreaOfInterest(numpy.ndarray):
return repr(self.tolist())
+ @classmethod
+ def from_dict(self, aoi_data: dict, working_directory: str = None) -> AreaOfInterestType:
+ """Load attributes from dictionary.
+
+ Parameters:
+ aoi_data: dictionary with attributes to load
+ working_directory: folder path where to load files when a dictionary value is a relative filepath.
+ """
+
+ # Get first and unique shape
+ # TODO: allow multiple shapes to describe more complex AOI
+ shape, shape_data = aoi_data.popitem()
+
+ if shape == 'Rectangle':
+
+ x = shape_data.pop('x')
+ y = shape_data.pop('y')
+ width = shape_data.pop('width')
+ height = shape_data.pop('height')
+
+ points = [[x, y], [x+width, y], [x+width, y+height], [x, y+height]]
+
+ return AreaOfInterest(points)
+
+ elif shape == 'Circle':
+
+ cx = shape_data.pop('cx')
+ cy = shape_data.pop('cy')
+ radius = shape_data.pop('radius')
+
+ # TODO: Use pygeos
+ N = 32
+ points = [(math.cos(2*math.pi / N*x) * radius + cx, math.sin(2*math.pi / N*x) * radius + cy) for x in range(0, N+1)]
+
+ return AreaOfInterest(points)
+
+ elif shape == 'Ellipse':
+
+ cx = shape_data.pop('cx')
+ cy = shape_data.pop('cy')
+ rx = shape_data.pop('rx')
+ ry = shape_data.pop('ry')
+
+ # TODO: Use pygeos
+ N = 32
+ points = [(math.cos(2*math.pi / N*x) * rx + cx, math.sin(2*math.pi / N*x) * ry + cy) for x in range(0, N+1)]
+
@property
def dimension(self) -> int:
"""Number of axis coding area points positions."""
@@ -127,8 +175,8 @@ class AreaOfInterest(numpy.ndarray):
return mpath.Path(self).contains_points([point])[0]
- def inner_axis(self, point: tuple) -> tuple:
- """Transform the coordinates from the global axis to the AOI's axis.
+ def inner_axis(self, x: float, y: float) -> tuple:
+ """Transform a point coordinates from global axis to AOI axis.
!!! warning
Available for 2D AOI only.
!!! danger
@@ -143,35 +191,30 @@ class AreaOfInterest(numpy.ndarray):
Dst = numpy.array([[0., 0.], [1., 0.], [1., 1.], [0., 1.]]).astype(numpy.float32)
P = cv2.getPerspectiveTransform(Src, Dst)
- X = numpy.append(numpy.array(numpy.array(point) - Src_origin), [1.0]).astype(numpy.float32)
+ X = numpy.append(numpy.array(numpy.array([x, y]) - Src_origin), [1.0]).astype(numpy.float32)
Y = numpy.dot(P, X)
La = (Y/Y[2])[:-1]
return tuple(numpy.around(La, 4))
- def outter_axis(self, point: tuple) -> tuple:
- """Transform the coordinates from the AOI's axis to the global axis.
- !!! warning
- Available for 2D AOI only.
+ def outter_axis(self, x: float, y: float) -> tuple:
+ """Transform a point coordinates from AOI axis to global axis.
!!! danger
- The AOI points must be sorted in clockwise order."""
-
- assert(self.dimension == 2)
-
- Src = numpy.array([[0., 0.], [1., 0.], [1., 1.], [0., 1.]]).astype(numpy.float32)
+ The AOI points must be sorted in clockwise order.
+ !!! danger
+ The AOI must be a rectangle."""
- Dst = self.astype(numpy.float32)
- Dst_origin = Dst[0]
- Dst = (Dst - Dst_origin).reshape((len(Dst)), 2)
+ # Origin point
+ O = self[0]
- P = cv2.getPerspectiveTransform(Src, Dst)
- X = numpy.array([point[0], point[1], 1.0]).astype(numpy.float32)
- Y = numpy.dot(P, X)
+ # Horizontal axis vector
+ H = self[1] - self[0]
- Lp = Dst_origin + (Y/Y[2])[:-1]
+ # Vertical axis vector
+ V = self[3] - self[0]
- return tuple(numpy.rint(Lp).astype(int))
+ return tuple(O + x * H + y * V)
def circle_intersection(self, center: tuple, radius: float) -> Tuple[numpy.array, float, float]:
"""Get intersection shape with a circle, intersection area / AOI area ratio and intersection area / circle area ratio.
@@ -254,8 +297,15 @@ class AOIScene():
# Load areas
areas = {}
- for name, area in aoi_scene_data.items():
- areas[name] = AreaOfInterest(area)
+ for area_name, area_data in aoi_scene_data.items():
+
+ if type(area_data) == list:
+
+ areas[area_name] = AreaOfInterest(area_data)
+
+ elif type(area_data) == dict:
+
+ areas[area_name] = AreaOfInterest.from_dict(area_data)
# Default dimension is 0
dimension = 0
@@ -281,7 +331,7 @@ class AOIScene():
aoi_scene_data = json.load(configuration_file)
working_directory = os.path.dirname(json_filepath)
- return AOIScene.from_dict(aoi_scene_data, working_directory)
+ return AOIScene.from_dict(aoi_scene_data, working_directory)
def __getitem__(self, name) -> AreaOfInterest:
"""Get an AOI from the scene."""
@@ -353,6 +403,42 @@ class AOIScene():
return output
+ def __add__(self, add_vector) -> AOISceneType:
+ """Add vector to scene."""
+
+ assert(len(add_vector) == self.__dimension)
+
+ for name, area in self.__areas.items():
+
+ self.__areas[name] = self.__areas[name] + add_vector
+
+ return self
+
+ # Allow n + scene operation
+ __radd__ = __add__
+
+ def __sub__(self, sub_vector) -> AOISceneType:
+ """Sub vector to scene."""
+
+ assert(len(sub_vector) == self.__dimension)
+
+ for name, area in self.__areas.items():
+
+ self.__areas[name] = self.__areas[name] - sub_vector
+
+ return self
+
+ def __rsub__(self, rsub_vector) -> AOISceneType:
+ """RSub vector to scene."""
+
+ assert(len(rsub_vector) == self.__dimension)
+
+ for name, area in self.__areas.items():
+
+ self.__areas[name] = rsub_vector - self.__areas[name]
+
+ return self
+
def __mul__(self, scale_vector) -> AOISceneType:
"""Scale scene by a vector."""
@@ -367,6 +453,16 @@ class AOIScene():
# Allow n * scene operation
__rmul__ = __mul__
+ def __truediv__(self, div_vector) -> AOISceneType:
+
+ assert(len(div_vector) == self.__dimension)
+
+ for name, area in self.__areas.items():
+
+ self.__areas[name] = self.__areas[name] / div_vector
+
+ return self
+
def items(self) -> Tuple[str, AreaOfInterest]:
"""Iterate over areas."""
@@ -379,7 +475,7 @@ class AOIScene():
@property
def dimension(self) -> int:
- """Dimension of the AOIs in scene."""
+ """Dimension of the AOI in scene."""
return self.__dimension
diff --git a/src/argaze/DataStructures.py b/src/argaze/DataStructures.py
index 08a7d2c..9e35dea 100644
--- a/src/argaze/DataStructures.py
+++ b/src/argaze/DataStructures.py
@@ -45,6 +45,15 @@ def as_dict(dataclass_object) -> dict:
# Copy fields values
return {name: vars(dataclass_object)[name] for name in fields_names}
+def module_path(obj) -> str:
+ """
+ Get object module path.
+
+ Returns:
+ module path
+ """
+ return obj.__class__.__module__
+
class JsonEncoder(json.JSONEncoder):
"""Specific ArGaze JSON Encoder."""
@@ -55,10 +64,10 @@ class JsonEncoder(json.JSONEncoder):
if isinstance(obj, numpy.integer):
return int(obj)
- if isinstance(obj, numpy.floating):
+ elif isinstance(obj, numpy.floating):
return float(obj)
- if isinstance(obj, numpy.ndarray):
+ elif isinstance(obj, numpy.ndarray):
return obj.tolist()
# default case
@@ -73,7 +82,19 @@ class JsonEncoder(json.JSONEncoder):
public_dict = {}
for k, v in vars(obj).items():
+
if not k.startswith('_'):
+
+ # numpy cases
+ if isinstance(v, numpy.integer):
+ v = int(v)
+
+ elif isinstance(v, numpy.floating):
+ v = float(v)
+
+ elif isinstance(v, numpy.ndarray):
+ v = v.tolist()
+
public_dict[k] = v
return public_dict
diff --git a/src/argaze/GazeAnalysis/Basic.py b/src/argaze/GazeAnalysis/Basic.py
index 7b41731..dc7b4fd 100644
--- a/src/argaze/GazeAnalysis/Basic.py
+++ b/src/argaze/GazeAnalysis/Basic.py
@@ -79,12 +79,27 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer):
self.__steps_number = len(aoi_scan_path)
sum_fixation_durations = 0
+ self.__sum_aoi_fixation_durations = {}
for aoi_scan_step in aoi_scan_path:
sum_fixation_durations += aoi_scan_step.fixation_duration
+ try:
+
+ self.__sum_aoi_fixation_durations[aoi_scan_step.aoi] = self.__sum_aoi_fixation_durations[aoi_scan_step.aoi] + aoi_scan_step.fixation_duration
+
+ except KeyError:
+
+ self.__sum_aoi_fixation_durations[aoi_scan_step.aoi] = aoi_scan_step.fixation_duration
+
self.__step_fixation_durations_average = sum_fixation_durations / self.__steps_number
+
+ self.__aoi_fixation_distribution = {}
+
+ for aoi_name, sum_aoi_fixation_duration in self.__sum_aoi_fixation_durations.items():
+
+ self.__aoi_fixation_distribution[aoi_name] = sum_aoi_fixation_duration / sum_fixation_durations
@property
def path_duration(self) -> float:
@@ -102,4 +117,10 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer):
def step_fixation_durations_average(self) -> float:
"""AOI scan path step fixation durations average."""
- return self.__step_fixation_durations_average \ No newline at end of file
+ return self.__step_fixation_durations_average
+
+ @property
+ def aoi_fixation_distribution(self) -> dict:
+ """percentage of time spent on each AOI."""
+
+ return self.__aoi_fixation_distribution \ No newline at end of file
diff --git a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py
index f0decfc..acc0665 100644
--- a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py
+++ b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py
@@ -33,8 +33,8 @@ class AOIMatcher(GazeFeatures.AOIMatcher):
self.__look_count = 0
self.__looked_aoi_data = (None, None)
+ self.__looked_probabilities = {}
self.__circle_ratio_sum = {}
- self.__aois_coverages = {}
self.__matched_gaze_movement = None
self.__matched_region = None
@@ -54,7 +54,7 @@ class AOIMatcher(GazeFeatures.AOIMatcher):
# BAD: we use deviation_max attribute which is an attribute of DispersionThresholdIdentification.Fixation class
region, _, circle_ratio = aoi.circle_intersection(gaze_movement.focus, gaze_movement.deviation_max)
- if name not in self.exclude and circle_ratio > 0:
+ if name not in self.exclude and circle_ratio > self.coverage_threshold:
# Sum circle ratio to update aoi coverage
try:
@@ -78,15 +78,15 @@ class AOIMatcher(GazeFeatures.AOIMatcher):
# Update looked aoi data
self.__looked_aoi_data = most_likely_looked_aoi_data
- # Calculate looked aoi circle ratio means
- self.__aois_coverages = {}
+ # Calculate circle ratio means as looked probabilities
+ self.__looked_probabilities = {}
for aoi_name, circle_ratio_sum in self.__circle_ratio_sum.items():
circle_ratio_mean = circle_ratio_sum / self.__look_count
- # filter circle ration mean greater than 1
- self.__aois_coverages[aoi_name] = circle_ratio_mean if circle_ratio_mean < 1 else 1
+ # Avoid probability greater than 1
+ self.__looked_probabilities[aoi_name] = circle_ratio_mean if circle_ratio_mean < 1 else 1
# Update matched gaze movement
self.__matched_gaze_movement = gaze_movement
@@ -95,9 +95,7 @@ class AOIMatcher(GazeFeatures.AOIMatcher):
self.__matched_region = matched_region
# Return
- if self.__aois_coverages[most_likely_looked_aoi_data[0]] > self.coverage_threshold:
-
- return self.__looked_aoi_data
+ return self.__looked_aoi_data
elif GazeFeatures.is_saccade(gaze_movement):
@@ -109,14 +107,13 @@ class AOIMatcher(GazeFeatures.AOIMatcher):
return (None, None)
- def draw(self, image: numpy.array, aoi_scene: AOIFeatures.AOIScene, draw_matched_fixation: dict = None, draw_matched_fixation_positions: dict = None, draw_matched_region: dict = None, draw_looked_aoi: dict = None, update_looked_aoi: bool = False, looked_aoi_name_color: tuple = None, looked_aoi_name_offset: tuple = (0, 0)):
+ def draw(self, image: numpy.array, aoi_scene: AOIFeatures.AOIScene, draw_matched_fixation: dict = None, draw_matched_region: dict = None, draw_looked_aoi: dict = None, update_looked_aoi: bool = False, looked_aoi_name_color: tuple = None, looked_aoi_name_offset: tuple = (0, 0)):
"""Draw matching into image.
Parameters:
image: where to draw
aoi_scene: to refresh looked aoi if required
draw_matched_fixation: Fixation.draw parameters (which depends of the loaded gaze movement identifier module, if None, no fixation is drawn)
- draw_matched_fixation_positions: GazeMovement.draw_positions parameters (if None, no fixation is drawn)
draw_matched_region: AOIFeatures.AOI.draw parameters (if None, no matched region is drawn)
draw_looked_aoi: AOIFeatures.AOI.draw parameters (if None, no looked aoi is drawn)
looked_aoi_name_color: color of text (if None, no looked aoi name is drawn)
@@ -132,11 +129,6 @@ class AOIMatcher(GazeFeatures.AOIMatcher):
self.__matched_gaze_movement.draw(image, **draw_matched_fixation)
- # Draw matched fixation positions if required
- if draw_matched_fixation_positions is not None:
-
- self.__matched_gaze_movement.draw_positions(image, **draw_matched_fixation_positions)
-
# Draw matched aoi
if self.looked_aoi.all() is not None:
@@ -179,8 +171,11 @@ class AOIMatcher(GazeFeatures.AOIMatcher):
return self.__looked_aoi_data[0]
@property
- def aois_coverages(self) -> dict:
- """Get all aois coverage means for current fixation.
- It represents the ratio of fixation deviation circle surface that used to cover the aoi."""
+ def looked_probabilities(self) -> dict:
+ """Get probabilities to be looked by current fixation for each aoi.
+
+ !!! note
+ aoi where fixation deviation circle never passed the coverage threshold will be missing.
+ """
- return self.__aois_coverages \ No newline at end of file
+ return self.__looked_probabilities \ No newline at end of file
diff --git a/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py b/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py
index 15fddf4..a7b9900 100644
--- a/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py
+++ b/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py
@@ -73,7 +73,7 @@ class Fixation(GazeFeatures.Fixation):
return self
- def draw(self, image: numpy.array, deviation_circle_color: tuple = None, duration_border_color: tuple = None, duration_factor: float = 1.):
+ def draw(self, image: numpy.array, deviation_circle_color: tuple = None, duration_border_color: tuple = None, duration_factor: float = 1., draw_positions: dict = None):
"""Draw fixation into image.
Parameters:
@@ -82,15 +82,20 @@ class Fixation(GazeFeatures.Fixation):
duration_factor: how many pixels per duration unit
"""
+ # Draw duration border if required
+ if duration_border_color is not None:
+
+ cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), duration_border_color, int(self.duration * duration_factor))
+
# Draw deviation circle if required
if deviation_circle_color is not None:
cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), deviation_circle_color, -1)
- # Draw duration border if required
- if duration_border_color is not None:
+ # Draw positions if required
+ if draw_positions is not None:
- cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), duration_border_color, int(self.duration * duration_factor))
+ self.draw_positions(image, **draw_positions)
@dataclass(frozen=True)
class Saccade(GazeFeatures.Saccade):
diff --git a/src/argaze/GazeAnalysis/ExploitExploreRatio.py b/src/argaze/GazeAnalysis/ExploreExploitRatio.py
index f35561f..b4550e7 100644
--- a/src/argaze/GazeAnalysis/ExploitExploreRatio.py
+++ b/src/argaze/GazeAnalysis/ExploreExploitRatio.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
-"""Exploit/Explore ratio module.
+"""Explore/Explore ratio module.
"""
__author__ = "Théo de la Hogue"
@@ -16,13 +16,8 @@ import numpy
@dataclass
class ScanPathAnalyzer(GazeFeatures.ScanPathAnalyzer):
- """Implementation of exploit vs explore ratio algorithm as described in:
+ """Implementation of explore vs exploit ratio algorithm as described in:
- **Goldberg J. H., Kotval X. P. (1999).**
- *Computer interface evaluation using eye movements: methods and constructs.*
- International Journal of Industrial Ergonomics (631–645).
- [https://doi.org/10.1016/S0169-8141(98)00068-7](https://doi.org/10.1016/S0169-8141\\(98\\)00068-7)
-
**Dehais F., Peysakhovich V., Scannella S., Fongue J., Gateau T. (2015).**
*Automation surprise in aviation: Real-time solutions.*
Proceedings of the 33rd annual ACM conference on Human Factors in Computing Systems (2525–2534).
@@ -36,7 +31,7 @@ class ScanPathAnalyzer(GazeFeatures.ScanPathAnalyzer):
super().__init__()
- self.__exploit_explore_ratio = 0.
+ self.__explore_exploit_ratio = 0.
def analyze(self, scan_path: GazeFeatures.ScanPathType):
"""Analyze scan path."""
@@ -63,13 +58,13 @@ class ScanPathAnalyzer(GazeFeatures.ScanPathAnalyzer):
long_fixations_duration = numpy.array(long_fixations_durations).sum()
saccades_duration = numpy.array(saccades_durations).sum()
- assert(saccades_duration + short_fixations_duration > 0)
+ assert(long_fixations_duration > 0)
- self.__exploit_explore_ratio = long_fixations_duration / (saccades_duration + short_fixations_duration)
+ self.__explore_exploit_ratio = (saccades_duration + short_fixations_duration) / long_fixations_duration
@property
- def exploit_explore_ratio(self) -> float:
- """Exploit/Explore ratio."""
+ def explore_exploit_ratio(self) -> float:
+ """Explore/Exploit ratio."""
- return self.__exploit_explore_ratio
+ return self.__explore_exploit_ratio
\ No newline at end of file
diff --git a/src/argaze/GazeAnalysis/FocusPointInside.py b/src/argaze/GazeAnalysis/FocusPointInside.py
index b3651e4..81a9d20 100644
--- a/src/argaze/GazeAnalysis/FocusPointInside.py
+++ b/src/argaze/GazeAnalysis/FocusPointInside.py
@@ -54,14 +54,13 @@ class AOIMatcher(GazeFeatures.AOIMatcher):
return (None, None)
- def draw(self, image: numpy.array, aoi_scene: AOIFeatures.AOIScene, draw_matched_fixation: dict = None, draw_matched_fixation_positions: dict = None, draw_looked_aoi: dict = None, looked_aoi_name_color: tuple = None, looked_aoi_name_offset: tuple = (0, 0)):
+ def draw(self, image: numpy.array, aoi_scene: AOIFeatures.AOIScene, draw_matched_fixation: dict = None, draw_looked_aoi: dict = None, looked_aoi_name_color: tuple = None, looked_aoi_name_offset: tuple = (0, 0)):
"""Draw matching into image.
Parameters:
image: where to draw
aoi_scene: to refresh looked aoi if required
draw_matched_fixation: Fixation.draw parameters (which depends of the loaded gaze movement identifier module, if None, no fixation is drawn)
- draw_matched_fixation_positions: GazeMovement.draw_positions parameters (if None, no fixation is drawn)
draw_looked_aoi: AOIFeatures.AOI.draw parameters (if None, no looked aoi is drawn)
looked_aoi_name_color: color of text (if None, no looked aoi name is drawn)
looked_aoi_name_offset: ofset of text from the upper left aoi bounding box corner
@@ -76,11 +75,6 @@ class AOIMatcher(GazeFeatures.AOIMatcher):
self.__matched_gaze_movement.draw(image, **draw_matched_fixation)
- # Draw matched fixation positions if required
- if draw_matched_fixation_positions is not None:
-
- self.__matched_gaze_movement.draw_positions(image, **draw_matched_fixation_positions)
-
# Draw matched aoi
if self.looked_aoi.all() is not None:
diff --git a/src/argaze/GazeAnalysis/KCoefficient.py b/src/argaze/GazeAnalysis/KCoefficient.py
index 80fe1fd..c50bc3a 100644
--- a/src/argaze/GazeAnalysis/KCoefficient.py
+++ b/src/argaze/GazeAnalysis/KCoefficient.py
@@ -52,19 +52,24 @@ class ScanPathAnalyzer(GazeFeatures.ScanPathAnalyzer):
duration_std = numpy.std(durations)
amplitude_std = numpy.std(amplitudes)
- Ks = []
- for scan_step in scan_path:
+ if duration_std > 0. and amplitude_std > 0.:
+
+ Ks = []
+ for scan_step in scan_path:
+
+ Ks.append((abs(scan_step.duration - duration_mean) / duration_std) - (abs(scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std))
+
+ self.__K = numpy.array(Ks).mean()
- Ks.append(((scan_step.duration - duration_mean) / duration_std) - ((scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std))
+ else:
- self.__K = numpy.array(Ks).mean()
+ self.__K = 0.
@property
def K(self) -> float:
"""K coefficient."""
return self.__K
-
@dataclass
class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer):
@@ -104,12 +109,18 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer):
duration_std = numpy.std(durations)
amplitude_std = numpy.std(amplitudes)
- Ks = []
- for aoi_scan_step in aoi_scan_path:
+ if duration_std > 0. and amplitude_std > 0.:
+
+ Ks = []
+ for aoi_scan_step in aoi_scan_path:
+
+ Ks.append((abs(aoi_scan_step.duration - duration_mean) / duration_std) - (abs(aoi_scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std))
+
+ self.__K = numpy.array(Ks).mean()
- Ks.append(((aoi_scan_step.duration - duration_mean) / duration_std) - ((aoi_scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std))
+ else:
- self.__K = numpy.array(Ks).mean()
+ self.__K = 0.
@property
def K(self) -> float:
diff --git a/src/argaze/GazeAnalysis/LinearRegression.py b/src/argaze/GazeAnalysis/LinearRegression.py
new file mode 100644
index 0000000..0e10b87
--- /dev/null
+++ b/src/argaze/GazeAnalysis/LinearRegression.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+
+"""Module for gaze position calibration based on linear regression.
+"""
+
+__author__ = "Théo de la Hogue"
+__credits__ = []
+__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)"
+__license__ = "BSD"
+
+from typing import TypeVar, Tuple
+from dataclasses import dataclass, field
+
+from argaze import GazeFeatures
+
+from sklearn.linear_model import LinearRegression
+import numpy
+import cv2
+
+GazePositionType = TypeVar('GazePositionType', bound="GazePositionType")
+# Type definition for type annotation convenience
+
+@dataclass
+class GazePositionCalibrator(GazeFeatures.GazePositionCalibrator):
+ """Calibration algorithm based on linear regression."""
+
+ coefficients: numpy.array = field(default_factory=lambda : numpy.array([[1., 0.], [0., 1.]]))
+ """Linear regression coefficients"""
+
+ intercept: numpy.array = field(default_factory=lambda : numpy.array([0., 0.]))
+ """Linear regression intercept value"""
+
+ def __post_init__(self):
+ """Init calibration."""
+
+ self.__linear_regression = LinearRegression()
+ self.__linear_regression.coef_ = numpy.array(self.coefficients)
+ self.__linear_regression.intercept_ = numpy.array(self.intercept)
+
+ def store(self, timestamp: int|float, observed_gaze_position: GazeFeatures.GazePosition, expected_gaze_position: GazeFeatures.GazePosition):
+ """Store observed and expected gaze positions."""
+
+ self.__observed_positions.append(observed_gaze_position.value)
+ self.__expected_positions.append(expected_gaze_position.value)
+
+ def reset(self):
+ """Reset observed and expected gaze positions."""
+
+ self.__observed_positions = []
+ self.__expected_positions = []
+ self.__linear_regression = None
+
+ def calibrate(self) -> float:
+ """Process calibration from observed and expected gaze positions.
+
+ Returns:
+ score: the score of linear regression
+ """
+
+ self.__linear_regression = LinearRegression().fit(self.__observed_positions, self.__expected_positions)
+
+ # Update frozen coefficients attribute
+ object.__setattr__(self, 'coefficients', self.__linear_regression.coef_)
+
+ # Update frozen intercept attribute
+ object.__setattr__(self, 'intercept', self.__linear_regression.intercept_)
+
+ # Return calibrated gaze position
+ return self.__linear_regression.score(self.__observed_positions, self.__expected_positions)
+
+ def apply(self, gaze_position: GazeFeatures.GazePosition) -> GazePositionType:
+ """Apply calibration onto observed gaze position."""
+
+ if not self.calibrating:
+
+ return GazeFeatures.GazePosition(self.__linear_regression.predict(numpy.array([gaze_position.value]))[0], precision=gaze_position.precision)
+
+ else:
+
+ return gaze_position
+
+ def draw(self, image: numpy.array, size: tuple, resolution: tuple, line_color: tuple = (0, 0, 0), thickness: int = 1):
+ """Draw calibration field."""
+
+ width, height = size
+
+ if width * height > 0:
+
+ rx, ry = resolution
+ lx = numpy.linspace(0, width, rx)
+ ly = numpy.linspace(0, height, ry)
+ xv, yv = numpy.meshgrid(lx, ly, indexing='ij')
+
+ for i in range(rx):
+
+ for j in range(ry):
+
+ start = (xv[i][j], yv[i][j])
+ end = self.apply(GazeFeatures.GazePosition(start)).value
+
+ cv2.line(image, (int(start[0]), int(start[1])), (int(end[0]), int(end[1])), line_color, thickness)
+
+ @property
+ def calibrating(self) -> bool:
+ """Is the calibration running?"""
+
+ return self.__linear_regression is None \ No newline at end of file
diff --git a/src/argaze/GazeAnalysis/TransitionMatrix.py b/src/argaze/GazeAnalysis/TransitionMatrix.py
index 6f408e4..b346b5a 100644
--- a/src/argaze/GazeAnalysis/TransitionMatrix.py
+++ b/src/argaze/GazeAnalysis/TransitionMatrix.py
@@ -42,7 +42,7 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer):
row_sum = aoi_scan_path.transition_matrix.apply(lambda row: row.sum(), axis=1)
# Editing transition matrix probabilities
- # Note: when no transiton starts from an aoi, destination probabilites is equal to 1/S where S is the number of aois
+ # Note: when no transiton starts from an aoi, destination probabilites is equal to 1/S where S is the number of aoi
self.__transition_matrix_probabilities = aoi_scan_path.transition_matrix.apply(lambda row: row.apply(lambda p: p / row_sum[row.name] if row_sum[row.name] > 0 else 1 / row_sum.size), axis=1)
# Calculate matrix density
diff --git a/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py b/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py
index 64931f5..d10f666 100644
--- a/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py
+++ b/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py
@@ -72,7 +72,7 @@ class Fixation(GazeFeatures.Fixation):
return self
- def draw(self, image: numpy.array, deviation_circle_color: tuple = None, duration_border_color: tuple = None, duration_factor: float = 1.):
+ def draw(self, image: numpy.array, deviation_circle_color: tuple = None, duration_border_color: tuple = None, duration_factor: float = 1., draw_positions: dict = None):
"""Draw fixation into image.
Parameters:
@@ -81,15 +81,20 @@ class Fixation(GazeFeatures.Fixation):
duration_factor: how many pixels per duration unit
"""
+ # Draw duration border if required
+ if duration_border_color is not None:
+
+ cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), duration_border_color, int(self.duration * duration_factor))
+
# Draw deviation circle if required
if deviation_circle_color is not None:
cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), deviation_circle_color, -1)
- # Draw duration border if required
- if duration_border_color is not None:
+ # Draw positions if required
+ if draw_positions is not None:
- cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), duration_border_color, int(self.duration * duration_factor))
+ self.draw_positions(image, **draw_positions)
@dataclass(frozen=True)
class Saccade(GazeFeatures.Saccade):
diff --git a/src/argaze/GazeAnalysis/__init__.py b/src/argaze/GazeAnalysis/__init__.py
index 164de74..c110eb1 100644
--- a/src/argaze/GazeAnalysis/__init__.py
+++ b/src/argaze/GazeAnalysis/__init__.py
@@ -1,4 +1,4 @@
"""
Various gaze movement identification, AOI matching and scan path analysis algorithms.
"""
-__all__ = ['Basic', 'DispersionThresholdIdentification', 'VelocityThresholdIdentification', 'TransitionMatrix', 'KCoefficient', 'LempelZivComplexity', 'NGram', 'Entropy', 'NearestNeighborIndex', 'ExploitExploreRatio'] \ No newline at end of file
+__all__ = ['Basic', 'DispersionThresholdIdentification', 'VelocityThresholdIdentification', 'TransitionMatrix', 'KCoefficient', 'LempelZivComplexity', 'NGram', 'Entropy', 'NearestNeighborIndex', 'ExploreExploitRatio', 'LinearRegression'] \ No newline at end of file
diff --git a/src/argaze/GazeFeatures.py b/src/argaze/GazeFeatures.py
index 2dd1cab..46e9f17 100644
--- a/src/argaze/GazeFeatures.py
+++ b/src/argaze/GazeFeatures.py
@@ -12,6 +12,7 @@ from dataclasses import dataclass, field
import math
import ast
import json
+import importlib
from inspect import getmembers
from argaze import DataStructures
@@ -201,6 +202,113 @@ class TimeStampedGazePositions(DataStructures.TimeStampedBuffer):
return TimeStampedGazePositions(df.to_dict('index'))
+class GazePositionCalibrationFailed(Exception):
+ """Exception raised by GazePositionCalibrator."""
+
+ def __init__(self, message):
+
+ super().__init__(message)
+
+GazePositionCalibratorType = TypeVar('GazePositionCalibrator', bound="GazePositionCalibrator")
+# Type definition for type annotation convenience
+
+@dataclass
+class GazePositionCalibrator():
+ """Abstract class to define what should provide a gaze position calibrator algorithm."""
+
+ @classmethod
+ def from_dict(self, calibrator_data: dict) -> GazePositionCalibratorType:
+ """Load gaze position calibrator from dictionary.
+
+ Parameters:
+ calibrator_data: dictionary with class name and attributes to load
+ """
+ gaze_position_calibrator_module_path, gaze_position_calibrator_parameters = calibrator_data.popitem()
+
+ # Prepend argaze.GazeAnalysis path when a single name is provided
+ if len(gaze_position_calibrator_module_path.split('.')) == 1:
+ gaze_position_calibrator_module_path = f'argaze.GazeAnalysis.{gaze_position_calibrator_module_path}'
+
+ gaze_position_calibrator_module = importlib.import_module(gaze_position_calibrator_module_path)
+ return gaze_position_calibrator_module.GazePositionCalibrator(**gaze_position_calibrator_parameters)
+
+ @classmethod
+ def from_json(self, json_filepath: str) -> GazePositionCalibratorType:
+ """Load calibrator from .json file."""
+
+ # Remember file path to ease rewriting
+ self.__json_filepath = json_filepath
+
+ # Open file
+ with open(self.__json_filepath) as calibration_file:
+
+ return GazePositionCalibrator.from_dict(json.load(calibration_file))
+
+ def to_json(self, json_filepath: str = None):
+ """Save calibrator into .json file."""
+
+ # Remember file path to ease rewriting
+ if json_filepath is not None:
+
+ self.__json_filepath = json_filepath
+
+ # Open file
+ with open(self.__json_filepath, 'w', encoding='utf-8') as calibration_file:
+
+ json.dump({DataStructures.module_path(self):DataStructures.JsonEncoder().default(self)}, calibration_file, ensure_ascii=False, indent=4)
+
+ def store(self, timestamp: int|float, observed_gaze_position: GazePosition, expected_gaze_position: GazePosition):
+ """Store observed and expected gaze positions.
+
+ Parameters:
+ timestamp: time of observed gaze position
+ observed_gaze_position: where gaze position actually is
+ expected_gaze_position: where gaze position should be
+ """
+
+ raise NotImplementedError('calibrate() method not implemented')
+
+ def reset(self):
+ """Reset observed and expected gaze positions."""
+
+ raise NotImplementedError('reset() method not implemented')
+
+ def calibrate(self) -> Any:
+ """Process calibration from observed and expected gaze positions.
+
+ Returns:
+ calibration outputs: any data returned to assess calibration
+ """
+
+ raise NotImplementedError('terminate() method not implemented')
+
+ def apply(self, observed_gaze_position: GazePosition) -> GazePositionType:
+ """Apply calibration onto observed gaze position.
+
+ Parameters:
+ observed_gaze_position: where gaze position actually is
+
+ Returns:
+ expected_gaze_position: where gaze position should be if the calibrator is ready else, observed gaze position
+ """
+
+ raise NotImplementedError('apply() method not implemented')
+
+ def draw(self, image: numpy.array):
+ """Draw calibration into image.
+
+ Parameters:
+ image: where to draw
+ """
+
+ raise NotImplementedError('draw() method not implemented')
+
+ @property
+ def calibrating(self) -> bool:
+ """Is the calibration running?"""
+
+ raise NotImplementedError('ready getter not implemented')
+
GazeMovementType = TypeVar('GazeMovement', bound="GazeMovement")
# Type definition for type annotation convenience
@@ -292,16 +400,16 @@ class GazeMovement():
ts_start, start_gaze_position = gaze_positions.pop_first()
ts_next, next_gaze_position = gaze_positions.first
- # Draw position if required
- if position_color is not None:
-
- start_gaze_position.draw(image, position_color, draw_precision=False)
-
# Draw line between positions if required
if line_color is not None:
cv2.line(image, (int(start_gaze_position[0]), int(start_gaze_position[1])), (int(next_gaze_position[0]), int(next_gaze_position[1])), line_color, 1)
+ # Draw position if required
+ if position_color is not None:
+
+ start_gaze_position.draw(image, position_color, draw_precision=False)
+
def draw(self, image: numpy.array, **kwargs):
"""Draw gaze movement into image."""
@@ -545,7 +653,7 @@ ScanStepType = TypeVar('ScanStep', bound="ScanStep")
# Type definition for type annotation convenience
class ScanStepError(Exception):
- """Exception raised at ScanStepError creation if a aoi scan step doesn't start by a fixation or doesn't end by a saccade."""
+ """Exception raised at ScanStep creation if a aoi scan step doesn't start by a fixation or doesn't end by a saccade."""
def __init__(self, message):
@@ -755,7 +863,7 @@ AOIScanStepType = TypeVar('AOIScanStep', bound="AOIScanStep")
# Type definition for type annotation convenience
class AOIScanStepError(Exception):
- """Exception raised at AOIScanStepError creation if a aoi scan step doesn't start by a fixation or doesn't end by a saccade."""
+ """Exception raised at AOIScanStep creation if a aoi scan step doesn't start by a fixation or doesn't end by a saccade."""
def __init__(self, message, aoi=''):
@@ -842,13 +950,13 @@ AOIScanPathType = TypeVar('AOIScanPathType', bound="AOIScanPathType")
class AOIScanPath(list):
"""List of aoi scan steps over successive aoi."""
- def __init__(self, expected_aois: list[str] = [], duration_max: int|float = 0):
+ def __init__(self, expected_aoi: list[str] = [], duration_max: int|float = 0):
super().__init__()
self.duration_max = duration_max
-
- self.expected_aois = expected_aois
+ self.expected_aoi = expected_aoi
+
self.__duration = 0
@property
@@ -903,13 +1011,13 @@ class AOIScanPath(list):
return sequence
@property
- def expected_aois(self):
+ def expected_aoi(self):
"""List of all expected aoi."""
- return self.__expected_aois
+ return self.__expected_aoi
- @expected_aois.setter
- def expected_aois(self, expected_aois: list[str] = []):
+ @expected_aoi.setter
+ def expected_aoi(self, expected_aoi: list[str] = []):
"""Edit list of all expected aoi.
!!! warning
@@ -917,15 +1025,15 @@ class AOIScanPath(list):
"""
self.clear()
- self.__expected_aois = expected_aois
+ self.__expected_aoi = expected_aoi
self.__movements = TimeStampedGazeMovements()
self.__current_aoi = ''
self.__index = ord('A')
self.__aoi_letter = {}
self.__letter_aoi = {}
- size = len(self.__expected_aois)
- self.__transition_matrix = pandas.DataFrame(numpy.zeros((size, size)), index=self.__expected_aois, columns=self.__expected_aois)
+ size = len(self.__expected_aoi)
+ self.__transition_matrix = pandas.DataFrame(numpy.zeros((size, size)), index=self.__expected_aoi, columns=self.__expected_aoi)
@property
def current_aoi(self):
@@ -953,7 +1061,7 @@ class AOIScanPath(list):
!!! warning
It could raise AOIScanStepError"""
- if looked_aoi not in self.__expected_aois:
+ if looked_aoi not in self.__expected_aoi:
raise AOIScanStepError('AOI not expected', looked_aoi)
@@ -1013,7 +1121,7 @@ class AOIScanPath(list):
"""Get how many fixations are there in the scan path and how many fixation are there in each aoi."""
scan_fixations_count = 0
- aoi_fixations_count = {aoi: 0 for aoi in self.__expected_aois}
+ aoi_fixations_count = {aoi: 0 for aoi in self.__expected_aoi}
for aoi_scan_step in self:
diff --git a/src/argaze/utils/aruco_markers_group_export.py b/src/argaze/utils/aruco_markers_group_export.py
new file mode 100644
index 0000000..d948105
--- /dev/null
+++ b/src/argaze/utils/aruco_markers_group_export.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+
+""" """
+
+__author__ = "Théo de la Hogue"
+__credits__ = []
+__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)"
+__license__ = "BSD"
+
+import argparse
+import time
+import itertools
+
+from argaze.ArUcoMarkers import ArUcoCamera, ArUcoMarkersGroup
+from argaze.utils import UtilsFeatures
+
+import cv2
+import numpy
+
+def main():
+ """
+ Load a MOVIE and an ArUcoCamera CONFIGURATION to detect ArUco markers inside a selected movie frame then, export detected ArUco markers group as .obj file into an OUTPUT folder.
+ """
+
+ # Manage arguments
+ parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
+ parser.add_argument('movie', metavar='MOVIE', type=str, default=None, help='movie path')
+ parser.add_argument('configuration', metavar='CONFIGURATION', type=str, default=None, help='ArUco camera configuration')
+
+ parser.add_argument('-s','--start', metavar='START', type=float, default=0., help='start time in second')
+ parser.add_argument('-o', '--output', metavar='OUTPUT', type=str, default='.', help='export folder path')
+ args = parser.parse_args()
+
+ # Load movie
+ video_capture = cv2.VideoCapture(args.movie)
+
+ video_fps = video_capture.get(cv2.CAP_PROP_FPS)
+ image_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
+ image_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
+
+ # Load ArUco camera
+ aruco_camera = ArUcoCamera.ArUcoCamera.from_json(args.configuration)
+
+ # Create empty ArUco scene
+ aruco_markers_group = None
+
+ # Create a window
+ cv2.namedWindow(aruco_camera.name, cv2.WINDOW_AUTOSIZE)
+
+ # Enable exit signal handler
+ exit = UtilsFeatures.ExitSignalHandler()
+
+ # Init image selection
+ current_image_index = -1
+ _, current_image = video_capture.read()
+ next_image_index = int(args.start * video_fps)
+ refresh = False
+
+ while not exit.status():
+
+ # Select a new image and detect markers once
+ if next_image_index != current_image_index or refresh:
+
+ video_capture.set(cv2.CAP_PROP_POS_FRAMES, next_image_index)
+
+ success, video_image = video_capture.read()
+
+ if success:
+
+ # Refresh once
+ refresh = False
+
+ current_image_index = video_capture.get(cv2.CAP_PROP_POS_FRAMES) - 1
+ current_image_time = video_capture.get(cv2.CAP_PROP_POS_MSEC)
+
+ # Detect markers
+ detection_time, projection_time, exceptions = aruco_camera.watch(video_image)
+
+ # Estimate each markers pose
+ aruco_camera.aruco_detector.estimate_markers_pose(aruco_camera.aruco_detector.detected_markers)
+
+ # Build aruco scene from detected markers
+ aruco_markers_group = ArUcoMarkersGroup.ArUcoMarkersGroup(aruco_camera.aruco_detector.marker_size, aruco_camera.aruco_detector.dictionary, aruco_camera.aruco_detector.detected_markers)
+
+ # Get camera image
+ camera_image = aruco_camera.image()
+
+ # Write detected markers
+ cv2.putText(camera_image, f'Detecting markers {list(aruco_camera.aruco_detector.detected_markers.keys())}', (20, aruco_camera.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+
+ # Write timing
+ cv2.putText(camera_image, f'Frame at {int(current_image_time)}ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(camera_image, f'Detection {int(detection_time)}ms', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(camera_image, f'Projection {int(projection_time)}ms', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+
+ # Write documentation
+ cv2.putText(camera_image, f'<- previous image', (aruco_camera.size[0]-500, aruco_camera.size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(camera_image, f'-> next image', (aruco_camera.size[0]-500, aruco_camera.size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(camera_image, f'Ctrl+s: export ArUco markers', (aruco_camera.size[0]-500, aruco_camera.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+
+ # Copy image
+ current_image = camera_image.copy()
+
+ # Keep last image
+ else:
+
+ video_image = current_image.copy()
+
+ key_pressed = cv2.waitKey(10)
+
+ #if key_pressed != -1:
+ # print(key_pressed)
+
+ # Select previous image with left arrow
+ if key_pressed == 2:
+ next_image_index -= 1
+
+ # Select next image with right arrow
+ if key_pressed == 3:
+ next_image_index += 1
+
+ # Clip image index
+ if next_image_index < 0:
+ next_image_index = 0
+
+ # r: reload configuration
+ if key_pressed == 114:
+
+ aruco_camera = ArUcoCamera.ArUcoCamera.from_json(args.configuration)
+ refresh = True
+ print('Configuration reloaded')
+
+ # Save selected marker edition using 'Ctrl + s'
+ if key_pressed == 19:
+
+ if aruco_markers_group:
+
+ aruco_markers_group.to_obj(f'{args.output}/{int(current_image_time)}-aruco_markers_group.obj')
+ print(f'ArUco markers saved into {args.output}')
+
+ else:
+
+ print(f'No ArUco markers to export')
+
+ # Close window using 'Esc' key
+ if key_pressed == 27:
+ break
+
+ # Display video
+ cv2.imshow(aruco_camera.name, video_image)
+
+ # Close movie capture
+ video_capture.release()
+
+ # Stop image display
+ cv2.destroyAllWindows()
+
+if __name__ == '__main__':
+
+ main() \ No newline at end of file
diff --git a/src/argaze/utils/aruco_markers_scene_export.py b/src/argaze/utils/aruco_markers_scene_export.py
deleted file mode 100644
index f618342..0000000
--- a/src/argaze/utils/aruco_markers_scene_export.py
+++ /dev/null
@@ -1,176 +0,0 @@
-#!/usr/bin/env python
-
-""" """
-
-__author__ = "Théo de la Hogue"
-__credits__ = []
-__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)"
-__license__ = "BSD"
-
-import argparse
-import time
-import itertools
-
-from argaze.ArUcoMarkers import ArUcoMarkersDictionary, ArUcoOpticCalibrator, ArUcoDetector, ArUcoMarkersGroup
-from argaze.utils import UtilsFeatures
-
-import cv2
-import numpy
-
-def main():
- """
- Load a movie with ArUco markers inside and select image into it, detect ArUco markers belonging to a given dictionary and size into the selected image thanks to given optic parameters and detector parameters then, export detected ArUco scene as .obj file.
- """
-
- # Manage arguments
- parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
- parser.add_argument('movie', metavar='MOVIE', type=str, default=None, help='movie path')
- parser.add_argument('dictionary', metavar='DICTIONARY', type=str, default=None, help='ArUco dictionary to detect')
- parser.add_argument('marker_size', metavar='MARKER_SIZE', type=int, default=3, help='marker size in cm')
- parser.add_argument('optic_parameters', metavar='OPTIC_PARAMETERS', type=str, default=None, help='Optic parameters from camera calibration process')
- parser.add_argument('detector_parameters', metavar='DETECTOR_PARAMETERS', type=str, default=None, help='ArUco detector parameters')
-
- parser.add_argument('-s','--start', metavar='START', type=float, default=0., help='start time in second')
- parser.add_argument('-o', '--output', metavar='OUT', type=str, default='.', help='export scene folder path')
- args = parser.parse_args()
-
- # Load movie
- video_capture = cv2.VideoCapture(args.movie)
-
- video_fps = video_capture.get(cv2.CAP_PROP_FPS)
- image_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
- image_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
-
- # Load ArUco dictionary
- aruco_dictionary = ArUcoMarkersDictionary.ArUcoMarkersDictionary(args.dictionary)
-
- # Load optic parameters
- optic_parameters = ArUcoOpticCalibrator.OpticParameters.from_json(args.optic_parameters)
-
- # Load detector parameters
- detector_parameters = ArUcoDetector.DetectorParameters.from_json(args.detector_parameters)
-
- # Create ArUco detector
- aruco_detector = ArUcoDetector.ArUcoDetector(dictionary=aruco_dictionary, marker_size=args.marker_size, optic_parameters=optic_parameters, parameters=detector_parameters)
-
- # Create empty ArUco scene
- aruco_markers_group = None
-
- # Create a window to display AR environment
- window_name = "Export ArUco scene"
- cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
-
- # Enable exit signal handler
- exit = UtilsFeatures.ExitSignalHandler()
-
- # Init image selection
- current_image_index = -1
- _, current_image = video_capture.read()
- next_image_index = int(args.start * video_fps)
- refresh = False
-
- # Hide help
- draw_help = False
-
- while not exit.status():
-
- # Select a new image and detect markers once
- if next_image_index != current_image_index or refresh:
-
- video_capture.set(cv2.CAP_PROP_POS_FRAMES, next_image_index)
-
- success, video_image = video_capture.read()
-
- if success:
-
- # Refresh once
- refresh = False
-
- current_image_index = video_capture.get(cv2.CAP_PROP_POS_FRAMES) - 1
- current_image_time = video_capture.get(cv2.CAP_PROP_POS_MSEC)
-
- # Detect markers
- aruco_detector.detect_markers(video_image)
-
- # Estimate markers pose
- aruco_detector.estimate_markers_pose()
-
- # Build aruco scene from detected markers
- aruco_markers_group = ArUcoMarkersGroup.ArUcoMarkersGroup(args.marker_size, aruco_dictionary, aruco_detector.detected_markers)
-
- # Write scene detected markers
- cv2.putText(video_image, f'{list(aruco_detector.detected_markers.keys())}', (20, image_height-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
-
- # Write timing
- cv2.putText(video_image, f'Time: {int(current_image_time)} ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
-
- # Copy image
- current_image = video_image.copy()
-
- # Keep last image
- else:
-
- video_image = current_image.copy()
-
- # Draw detected markers
- aruco_detector.draw_detected_markers(video_image, {"color": [0, 255, 0], "draw_axes": {"thickness": 4}})
-
- # Write documentation
- cv2.putText(video_image, f'Press \'h\' for help', (950, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
-
- if draw_help:
-
- cv2.rectangle(video_image, (0, 50), (500, 300), (127, 127, 127), -1)
- cv2.putText(video_image, f'> Left arrow: previous image', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_image, f'> Right arrow: next image', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_image, f'> Ctrl+s: export ArUco scene', (20, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
-
- key_pressed = cv2.waitKey(10)
-
- #if key_pressed != -1:
- # print(key_pressed)
-
- # Select previous image with left arrow
- if key_pressed == 2:
- next_image_index -= 1
-
- # Select next image with right arrow
- if key_pressed == 3:
- next_image_index += 1
-
- # Clip image index
- if next_image_index < 0:
- next_image_index = 0
-
- # Switch help mode with h key
- if key_pressed == 104:
- draw_help = not draw_help
-
- # Save selected marker edition using 'Ctrl + s'
- if key_pressed == 19:
-
- if aruco_markers_group:
-
- aruco_markers_group.to_obj(f'{args.output}/{int(current_image_time)}-aruco_markers_group.obj')
- print(f'ArUco scene saved into {args.output}')
-
- else:
-
- print(f'No ArUco scene to export')
-
- # Close window using 'Esc' key
- if key_pressed == 27:
- break
-
- # Display video
- cv2.imshow(window_name, video_image)
-
- # Close movie capture
- video_capture.release()
-
- # Stop image display
- cv2.destroyAllWindows()
-
-if __name__ == '__main__':
-
- main() \ No newline at end of file
diff --git a/src/argaze/utils/demo_aruco_markers_run.py b/src/argaze/utils/demo_aruco_markers_run.py
index 6dc081d..5e1ac2e 100644
--- a/src/argaze/utils/demo_aruco_markers_run.py
+++ b/src/argaze/utils/demo_aruco_markers_run.py
@@ -14,6 +14,7 @@ import time
from argaze import ArFeatures, GazeFeatures
from argaze.ArUcoMarkers import ArUcoCamera
+from argaze.utils import UtilsFeatures
import cv2
import numpy
@@ -40,9 +41,29 @@ def main():
# Init timestamp
start_time = time.time()
+ # Prepare gaze analysis assessment
+ call_chrono = UtilsFeatures.TimeProbe()
+ call_chrono.start()
+
+ gaze_positions_frequency = 0
+ gaze_analysis_time = 0
+
# Fake gaze position with mouse pointer
def on_mouse_event(event, x, y, flags, param):
+ nonlocal gaze_positions_frequency
+ nonlocal gaze_analysis_time
+
+ # Assess gaze analysis
+ lap_time, nb_laps, elapsed_time = call_chrono.lap()
+
+ if elapsed_time > 1e3:
+
+ gaze_positions_frequency = nb_laps
+ call_chrono.restart()
+
+ gaze_analysis_time = 0
+
# Edit millisecond timestamp
timestamp = int((time.time() - start_time) * 1e3)
@@ -54,12 +75,20 @@ def main():
gaze_movement, scan_step_analysis, layer_analysis, execution_times, exception = look_data
- # Do something with look data
- # ...
+ # Assess gaze analysis
+ gaze_analysis_time += execution_times['total']
# Attach mouse callback to window
cv2.setMouseCallback(aruco_camera.name, on_mouse_event)
+ # Prepare video fps assessment
+ video_fps = 0
+ video_chrono = UtilsFeatures.TimeProbe()
+ video_chrono.start()
+
+ # Prepare visualisation time assessment
+ visualisation_time = 0
+
# Enable camera video capture into separate thread
video_capture = cv2.VideoCapture(int(args.source) if args.source.isdecimal() else args.source)
@@ -69,30 +98,48 @@ def main():
# Capture images
while video_capture.isOpened():
+ # Assess capture time
+ capture_start = time.time()
+
# Read video image
success, video_image = video_capture.read()
+ # Assess capture time
+ capture_time = int((time.time() - capture_start) * 1e3)
+
if success:
+ # Assess video fps
+ lap_time, nb_laps, elapsed_time = video_chrono.lap()
+
+ if elapsed_time > 1e3:
+
+ video_fps = nb_laps
+ video_chrono.restart()
+
# Detect and project AR features
- detection_time, exceptions = aruco_camera.watch(video_image)
+ detection_time, projection_time, exceptions = aruco_camera.watch(video_image)
+
+ # Assess visualisation time
+ visualisation_start = time.time()
# Get ArUcoCamera frame image
aruco_camera_image = aruco_camera.image()
- # Write detection fps
- cv2.rectangle(aruco_camera_image, (0, 0), (420, 50), (63, 63, 63), -1)
- cv2.putText(aruco_camera_image, f'Detection fps: {1e3/detection_time:.1f}', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ # Write time info
+ cv2.rectangle(aruco_camera_image, (0, 0), (aruco_camera.size[0], 100), (63, 63, 63), -1)
+ cv2.putText(aruco_camera_image, f'{video_fps} FPS | Capture {capture_time}ms | Detection {int(detection_time)}ms | Projection {int(projection_time)}ms | Visualisation {visualisation_time}ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(aruco_camera_image, f'{gaze_positions_frequency} gaze positions/s | Gaze analysis {gaze_analysis_time:.2f}ms', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
# Handle exceptions
for i, (scene_name, e) in enumerate(exceptions.items()):
# Write errors
- cv2.rectangle(aruco_camera_image, (0, (i+1)*50), (720, (i+2)*50), (127, 127, 127), -1)
- cv2.putText(aruco_camera_image, f'{scene_name} error: {e}', (20, (i+1)*90), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.rectangle(aruco_camera_image, (0, (i+1)*100), (aruco_camera.size[0], (i+2)*80), (127, 127, 127), -1)
+ cv2.putText(aruco_camera_image, f'{scene_name} error: {e}', (20, (i+1)*140), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
# Write hint
- cv2.putText(aruco_camera_image, 'Mouve mouse pointer over gray rectangle area', (450, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(aruco_camera_image, 'Mouve mouse pointer over gray rectangle area', (20, aruco_camera.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
# Display ArUcoCamera frame image
cv2.imshow(aruco_camera.name, aruco_camera_image)
@@ -103,12 +150,21 @@ def main():
# Display scene frame
cv2.imshow(f'{scene_frame.parent.name}:{scene_frame.name}', scene_frame.image())
+ else:
+
+ # Assess visualisation time
+ visualisation_start = time.time()
+
# Stop by pressing 'Esc' key
- if cv2.waitKey(10) == 27:
+ # NOTE: on MacOS, cv2.waitKey(1) waits ~40ms
+ if cv2.waitKey(1) == 27:
# Close camera video capture
video_capture.release()
+ # Assess visualisation time
+ visualisation_time = int((time.time() - visualisation_start) * 1e3)
+
# Stop image display
cv2.destroyAllWindows()
diff --git a/src/argaze/utils/demo_data/aoi_2d_scene.json b/src/argaze/utils/demo_data/aoi_2d_scene.json
new file mode 100644
index 0000000..ac58b63
--- /dev/null
+++ b/src/argaze/utils/demo_data/aoi_2d_scene.json
@@ -0,0 +1,18 @@
+{
+ "BlueTriangle":[[960, 664], [1113, 971], [806, 971]],
+ "RedSquare": {
+ "Rectangle": {
+ "x": 268,
+ "y": 203,
+ "width": 308,
+ "height": 308
+ }
+ },
+ "GreenCircle": {
+ "Circle": {
+ "cx": 1497,
+ "cy": 356,
+ "radius": 153
+ }
+ }
+} \ No newline at end of file
diff --git a/src/argaze/utils/demo_data/aoi_3d_scene.obj b/src/argaze/utils/demo_data/aoi_3d_scene.obj
index d32e235..0ce97de 100644
--- a/src/argaze/utils/demo_data/aoi_3d_scene.obj
+++ b/src/argaze/utils/demo_data/aoi_3d_scene.obj
@@ -1,5 +1,3 @@
-# Blender v3.0.1 OBJ File: 'ar_camera.blend'
-# www.blender.org
o GrayRectangle
v 0.000000 0.000000 0.000000
v 25.000000 0.000000 0.000000
@@ -7,51 +5,3 @@ v 0.000000 14.960000 0.000000
v 25.000000 14.960000 0.000000
s off
f 1 2 4 3
-o RedSquare
-v 3.497026 8.309391 0.000000
-v 7.504756 8.309391 0.000000
-v 3.497026 12.314838 0.001030
-v 7.504756 12.314838 0.001030
-s off
-f 5 6 8 7
-o BlueTriangle
-v 10.500295 2.307687 0.000000
-v 14.503224 2.306344 0.000000
-v 12.502419 6.312207 0.001030
-s off
-f 9 10 11
-o GreenCircle
-v 19.495552 12.311101 0.000000
-v 19.105371 12.272672 0.000000
-v 18.730185 12.158860 0.000000
-v 18.384411 11.974040 0.000000
-v 18.081339 11.725314 0.000000
-v 17.832613 11.422241 0.000000
-v 17.647793 11.076468 0.000000
-v 17.533981 10.701282 0.000000
-v 17.495552 10.311101 0.000000
-v 17.533981 9.920920 0.000000
-v 17.647793 9.545734 0.000000
-v 17.832613 9.199961 0.000000
-v 18.081339 8.896888 0.000000
-v 18.384411 8.648162 0.000000
-v 18.730185 8.463342 0.000000
-v 19.105371 8.349530 0.000000
-v 19.495552 8.311101 0.000000
-v 19.885733 8.349530 0.000000
-v 20.260920 8.463342 0.000000
-v 20.606693 8.648162 0.000000
-v 20.909765 8.896887 0.000000
-v 21.158491 9.199960 0.000000
-v 21.343311 9.545733 0.000000
-v 21.457123 9.920920 0.000000
-v 21.495552 10.311101 0.000000
-v 21.457123 10.701282 0.000000
-v 21.343311 11.076468 0.000000
-v 21.158491 11.422241 0.000000
-v 20.909765 11.725314 0.000000
-v 20.606693 11.974040 0.000000
-v 20.260920 12.158860 0.000000
-v 19.885733 12.272672 0.000000
-s off
-f 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 12
diff --git a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json
index 9a3b79f..7a4f6d1 100644
--- a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json
+++ b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json
@@ -5,9 +5,10 @@
"dictionary": "DICT_APRILTAG_16h5",
"marker_size": 5,
"parameters": {
- "cornerRefinementMethod": 1,
+ "cornerRefinementMethod": 3,
"aprilTagQuadSigma": 2,
- "aprilTagDeglitch": 1
+ "aprilTagDeglitch": 1,
+ "useAruco3Detection": 1
}
},
"layers": {
@@ -40,7 +41,21 @@
"height": 72,
"z": 100,
"point_size": 1,
- "point_color": [0, 0, 255]
+ "point_color": [127, 127, 127]
+ },
+ "draw_scenes": {
+ "ArScene Demo": {
+ "draw_aruco_markers_group": {
+ "draw_axes": {
+ "thickness": 3,
+ "length": 10
+ },
+ "draw_places": {
+ "color": [0, 0, 0],
+ "border_size": 1
+ }
+ }
+ }
}
},
"scenes": {
@@ -53,11 +68,11 @@
},
"frames": {
"GrayRectangle": {
- "size": [640, 383],
+ "size": [1920, 1149],
"background": "frame_background.jpg",
"gaze_movement_identifier": {
"DispersionThresholdIdentification": {
- "deviation_max_threshold": 25,
+ "deviation_max_threshold": 50,
"duration_min_threshold": 200
}
},
@@ -65,12 +80,10 @@
"duration_max": 10000
},
"layers": {
- "GrayRectangle": {
- "aoi_scene": "aoi_3d_scene.obj",
+ "main_layer": {
+ "aoi_scene": "aoi_2d_scene.json",
"aoi_matcher": {
- "FocusPointInside": {
- "exclude": ["GrayRectangle"]
- }
+ "FocusPointInside": {}
}
}
},
@@ -82,16 +95,16 @@
"heatmap_weight": 0.5,
"draw_scan_path": {
"draw_fixations": {
- "deviation_circle_color": [0, 255, 255],
- "duration_border_color": [0, 127, 127],
+ "deviation_circle_color": [255, 0, 255],
+ "duration_border_color": [127, 0, 127],
"duration_factor": 1e-2
},
"draw_saccades": {
- "line_color": [0, 255, 255]
+ "line_color": [255, 0, 255]
}
},
"draw_layers": {
- "GrayRectangle": {
+ "main_layer": {
"draw_aoi_scene": {
"draw_aoi": {
"color": [255, 255, 255],
@@ -102,10 +115,6 @@
"draw_matched_fixation": {
"deviation_circle_color": [255, 255, 255]
},
- "draw_matched_fixation_positions": {
- "position_color": [0, 255, 255],
- "line_color": [0, 0, 0]
- },
"draw_looked_aoi": {
"color": [0, 255, 0],
"border_size": 2
@@ -115,6 +124,15 @@
}
}
},
+ "draw_fixations": {
+ "deviation_circle_color": [255, 255, 255],
+ "duration_border_color": [127, 0, 127],
+ "duration_factor": 1e-2,
+ "draw_positions": {
+ "position_color": [0, 255, 255],
+ "line_color": [0, 0, 0]
+ }
+ },
"draw_gaze_positions": {
"color": [0, 255, 255],
"size": 2
diff --git a/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json b/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json
index 414a6fe..f921662 100644
--- a/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json
+++ b/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json
@@ -8,6 +8,7 @@
"duration_min_threshold": 200
}
},
+ "filter_in_progress_identification": false,
"scan_path": {
"duration_max": 10000
},
@@ -17,7 +18,7 @@
"NearestNeighborIndex": {
"size": [1920, 1149]
},
- "ExploitExploreRatio": {
+ "ExploreExploitRatio": {
"short_fixation_duration_threshold": 0
}
},
@@ -25,11 +26,10 @@
"size": [320, 240]
},
"layers": {
- "GrayRectangle": {
- "aoi_scene": "aoi_3d_scene.obj",
+ "main_layer": {
+ "aoi_scene": "aoi_2d_scene.json",
"aoi_matcher": {
"DeviationCircleCoverage": {
- "exclude": ["GrayRectangle"],
"coverage_threshold": 0.5
}
},
@@ -60,11 +60,10 @@
},
"draw_saccades": {
"line_color": [255, 0, 255]
- },
- "deepness": 0
+ }
},
"draw_layers": {
- "GrayRectangle": {
+ "main_layer": {
"draw_aoi_scene": {
"draw_aoi": {
"color": [255, 255, 255],
@@ -73,11 +72,11 @@
},
"draw_aoi_matching": {
"draw_matched_fixation": {
- "deviation_circle_color": [255, 255, 255]
- },
- "draw_matched_fixation_positions": {
- "position_color": [0, 255, 255],
- "line_color": [0, 0, 0]
+ "deviation_circle_color": [255, 255, 255],
+ "draw_positions": {
+ "position_color": [0, 255, 0],
+ "line_color": [0, 0, 0]
+ }
},
"draw_matched_region": {
"color": [0, 255, 0],
@@ -92,6 +91,18 @@
}
}
},
+ "draw_fixations": {
+ "deviation_circle_color": [255, 255, 255],
+ "duration_border_color": [127, 0, 127],
+ "duration_factor": 1e-2,
+ "draw_positions": {
+ "position_color": [0, 255, 255],
+ "line_color": [0, 0, 0]
+ }
+ },
+ "draw_saccades": {
+ "line_color": [255, 0, 255]
+ },
"draw_gaze_positions": {
"color": [0, 255, 255],
"size": 2
diff --git a/src/argaze/utils/demo_gaze_analysis_run.py b/src/argaze/utils/demo_gaze_analysis_run.py
index 465c5db..9856d90 100644
--- a/src/argaze/utils/demo_gaze_analysis_run.py
+++ b/src/argaze/utils/demo_gaze_analysis_run.py
@@ -74,18 +74,18 @@ def main():
# Write last 5 steps of aoi scan path
path = ''
- for step in ar_frame.layers["GrayRectangle"].aoi_scan_path[-5:]:
+ for step in ar_frame.layers["main_layer"].aoi_scan_path[-5:]:
path += f'> {step.aoi} '
- path += f'> {ar_frame.layers["GrayRectangle"].aoi_scan_path.current_aoi}'
+ path += f'> {ar_frame.layers["main_layer"].aoi_scan_path.current_aoi}'
cv2.putText(frame_image, path, (20, ar_frame.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
# Display Transition matrix analysis if loaded
try:
- transition_matrix_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.TransitionMatrix"]
+ transition_matrix_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.TransitionMatrix"]
cv2.putText(frame_image, f'Transition matrix density: {transition_matrix_analyzer.transition_matrix_density:.2f}', (20, ar_frame.size[1]-160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
@@ -97,8 +97,8 @@ def main():
if from_aoi != to_aoi and probability > 0.0:
- from_center = ar_frame.layers['GrayRectangle'].aoi_scene[from_aoi].center.astype(int)
- to_center = ar_frame.layers['GrayRectangle'].aoi_scene[to_aoi].center.astype(int)
+ from_center = ar_frame.layers["main_layer"].aoi_scene[from_aoi].center.astype(int)
+ to_center = ar_frame.layers["main_layer"].aoi_scene[to_aoi].center.astype(int)
start_line = (0.5 * from_center + 0.5 * to_center).astype(int)
color = [int(probability*200) + 55, int(probability*200) + 55, int(probability*200) + 55]
@@ -112,7 +112,7 @@ def main():
# Display aoi scan path basic metrics analysis if loaded
try:
- basic_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.Basic"]
+ basic_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.Basic"]
# Write basic analysis
cv2.putText(frame_image, f'Step number: {basic_analyzer.steps_number}', (20, ar_frame.size[1]-440), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
@@ -141,7 +141,7 @@ def main():
# Display aoi scan path K-modified coefficient analysis if loaded
try:
- aoi_kc_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.KCoefficient"]
+ aoi_kc_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.KCoefficient"]
# Write aoi Kc analysis
if aoi_kc_analyzer.K < 0.:
@@ -158,7 +158,7 @@ def main():
# Display Lempel-Ziv complexity analysis if loaded
try:
- lzc_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.LempelZivComplexity"]
+ lzc_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.LempelZivComplexity"]
cv2.putText(frame_image, f'Lempel-Ziv complexity: {lzc_analyzer.lempel_ziv_complexity}', (20, ar_frame.size[1]-200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
@@ -168,7 +168,7 @@ def main():
# Display N-Gram analysis if loaded
try:
- ngram_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.NGram"]
+ ngram_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.NGram"]
# Display only 3-gram analysis
start = ar_frame.size[1] - ((len(ngram_analyzer.ngrams_count[3]) + 1) * 40)
@@ -188,7 +188,7 @@ def main():
# Display Entropy analysis if loaded
try:
- entropy_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.Entropy"]
+ entropy_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.Entropy"]
cv2.putText(frame_image, f'Stationary entropy: {entropy_analyzer.stationary_entropy:.3f},', (20, ar_frame.size[1]-280), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
cv2.putText(frame_image, f'Transition entropy: {entropy_analyzer.transition_entropy:.3f},', (20, ar_frame.size[1]-240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
@@ -206,12 +206,12 @@ def main():
except KeyError:
pass
- # Display Exploit/Explore ratio analysis if loaded
+ # Display Explore/Exploit ratio analysis if loaded
try:
- xxr_analyser = ar_frame.scan_path_analyzers["argaze.GazeAnalysis.ExploitExploreRatio"]
+ xxr_analyser = ar_frame.scan_path_analyzers["argaze.GazeAnalysis.ExploreExploitRatio"]
- cv2.putText(frame_image, f'Exploit explore ratio: {xxr_analyser.exploit_explore_ratio:.3f}', (20, ar_frame.size[1]-360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(frame_image, f'Explore/Exploit ratio: {xxr_analyser.explore_exploit_ratio:.3f}', (20, ar_frame.size[1]-360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
except KeyError: