aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThéo de la Hogue2023-06-07 14:34:14 +0200
committerThéo de la Hogue2023-06-07 14:34:14 +0200
commitc4552e04e1271a9210a934233beae5be1943d034 (patch)
treea44041e544bc700976237bfea9058ec06f9a2904
parentbd9cd27c9d44c072164f564ffffeb22e37106b89 (diff)
downloadargaze-c4552e04e1271a9210a934233beae5be1943d034.zip
argaze-c4552e04e1271a9210a934233beae5be1943d034.tar.gz
argaze-c4552e04e1271a9210a934233beae5be1943d034.tar.bz2
argaze-c4552e04e1271a9210a934233beae5be1943d034.tar.xz
Writing User guide and use cases section.
-rw-r--r--README.md18
-rw-r--r--docs/contributor_guide/build_documentation.md2
-rw-r--r--docs/contributor_guide/unitary_test.md5
-rw-r--r--docs/css/extra.css7
-rw-r--r--docs/img/aoi_projection.pngbin0 -> 20707 bytes
-rw-r--r--docs/img/aoi_scan_path.pngbin0 -> 29067 bytes
-rw-r--r--docs/img/ar_environment_axis.png (renamed from docs/user_guide/ar_environment_axis.png)bin106367 -> 106367 bytes
-rw-r--r--docs/img/aruco_dictionaries.pngbin0 -> 89114 bytes
-rw-r--r--docs/img/aruco_scene.pngbin0 -> 11903 bytes
-rw-r--r--docs/img/camera_calibrated.pngbin0 -> 9431 bytes
-rw-r--r--docs/img/camera_calibration.pngbin0 -> 29136 bytes
-rw-r--r--docs/img/camera_calibration_step.pngbin0 -> 29174 bytes
-rw-r--r--docs/img/camera_distorsion.pngbin0 -> 21784 bytes
-rw-r--r--docs/img/camera_marker_world_axis.pngbin0 -> 54236 bytes
-rw-r--r--docs/img/circle_intersection.pngbin0 -> 10800 bytes
-rw-r--r--docs/img/contains_point.pngbin0 -> 6505 bytes
-rw-r--r--docs/img/detected_markers.pngbin0 -> 14941 bytes
-rw-r--r--docs/img/distance.pngbin0 -> 9679 bytes
-rw-r--r--docs/img/favicon.icobin0 -> 15406 bytes
-rw-r--r--docs/img/fixation_and_saccade.pngbin0 -> 22230 bytes
-rw-r--r--docs/img/get_last_before.pngbin0 -> 9017 bytes
-rw-r--r--docs/img/get_last_until.pngbin0 -> 9113 bytes
-rw-r--r--docs/img/heatmap.pngbin0 -> 46531 bytes
-rw-r--r--docs/img/logo-argaze.pngbin0 -> 5248 bytes
-rw-r--r--docs/img/overlapping.pngbin0 -> 15668 bytes
-rw-r--r--docs/img/point_spread.pngbin0 -> 8542 bytes
-rw-r--r--docs/img/pop_last_before.pngbin0 -> 10447 bytes
-rw-r--r--docs/img/pop_last_until.pngbin0 -> 11074 bytes
-rw-r--r--docs/img/pose_estimation.pngbin0 -> 15818 bytes
-rw-r--r--docs/img/scan_path.pngbin0 -> 20567 bytes
-rw-r--r--docs/img/simone_aoi_scene.pngbin0 -> 141110 bytes
-rw-r--r--docs/img/simone_aruco_scene.pngbin0 -> 160927 bytes
-rw-r--r--docs/img/simone_cockpit.pngbin0 -> 671361 bytes
-rw-r--r--docs/img/simone_cockpit_3d.pngbin0 -> 745778 bytes
-rw-r--r--docs/img/simone_projection.pngbin0 -> 681649 bytes
-rw-r--r--docs/img/vision_cone.pngbin0 -> 14240 bytes
-rw-r--r--docs/index.md20
-rw-r--r--docs/installation.md (renamed from docs/getting_started/installation.md)22
-rw-r--r--docs/license.md5
-rw-r--r--docs/logo-large.pngbin31216 -> 0 bytes
-rw-r--r--docs/use_cases/simone_a320_cockpit_simulator.md28
-rw-r--r--docs/user_guide/ar_environment/environment_exploitation.md36
-rw-r--r--docs/user_guide/ar_environment/environment_setup.md77
-rw-r--r--docs/user_guide/ar_environment/introduction.md6
-rw-r--r--docs/user_guide/areas_of_interest/aoi_frame.md43
-rw-r--r--docs/user_guide/areas_of_interest/aoi_matching.md47
-rw-r--r--docs/user_guide/areas_of_interest/aoi_projection.md22
-rw-r--r--docs/user_guide/areas_of_interest/aoi_scene_description.md57
-rw-r--r--docs/user_guide/areas_of_interest/introduction.md8
-rw-r--r--docs/user_guide/areas_of_interest/vision_cone_filtering.md18
-rw-r--r--docs/user_guide/aruco_markers/camera_calibration.md83
-rw-r--r--docs/user_guide/aruco_markers/dictionary_selection.md17
-rw-r--r--docs/user_guide/aruco_markers/introduction.md14
-rw-r--r--docs/user_guide/aruco_markers/markers_creation.md17
-rw-r--r--docs/user_guide/aruco_markers/markers_detection.md47
-rw-r--r--docs/user_guide/aruco_markers/markers_pose_estimation.md20
-rw-r--r--docs/user_guide/aruco_markers/markers_scene_description.md117
-rw-r--r--docs/user_guide/demos.md (renamed from docs/getting_started/demos.md)0
-rw-r--r--docs/user_guide/gaze_analysis/gaze_movement.md141
-rw-r--r--docs/user_guide/gaze_analysis/gaze_position.md68
-rw-r--r--docs/user_guide/gaze_analysis/introduction.md7
-rw-r--r--docs/user_guide/gaze_analysis/scan_path.md168
-rw-r--r--docs/user_guide/ready-made.md (renamed from docs/getting_started/ready-made.md)0
-rw-r--r--docs/user_guide/timestamped_data/data_synchronisation.md106
-rw-r--r--docs/user_guide/timestamped_data/introduction.md6
-rw-r--r--docs/user_guide/timestamped_data/ordered_dictionary.md19
-rw-r--r--docs/user_guide/timestamped_data/pandas_dataframe_conversion.md31
-rw-r--r--docs/user_guide/timestamped_data/saving_and_loading.md14
-rw-r--r--mkdocs.yml44
69 files changed, 1309 insertions, 31 deletions
diff --git a/README.md b/README.md
index 31a5b63..f33dd18 100644
--- a/README.md
+++ b/README.md
@@ -1,17 +1,3 @@
-# ArGaze documentation
+# Welcome into ArGaze package
-**Useful links**: [Installation](getting_started#installation) | [Source Repository](https://git.recherche.enac.fr/projects/argaze/repository) | [Issue Tracker](https://git.recherche.enac.fr/projects/argaze/issues) | [Contact](mailto:achil-contact@recherche.enac.fr)
-
-![Logo](logo-large.png){ width=640px }
-
-**ArGaze** is a python toolkit to deal with gaze tracking in **Augmented Reality (AR) environment**.
-
-The ArGaze toolkit provides solutions to build 3D modeled AR environment defining **Areas Of Interest (AOI)** mapped on <a href="https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html" target="_blank">OpenCV ArUco markers</a> and so ease experimentation design with wearable eye tracker device.
-
-Further, tracked gaze can be projected onto AR environment for live or post **gaze analysis** thanks to **timestamped data** features.
-
-ArGaze can be combined with any wearable eye tracking device python library like Tobii or Pupil glasses.
-
-!!! note
-
- *This work is greatly inspired by [Andrew T. Duchowski, Vsevolod Peysakhovich and Krzysztof Krejtz article](https://git.recherche.enac.fr/attachments/download/1942/Using_Pose_Estimation_to_Map_Gaze_to_Detected_Fidu.pdf) about using pose estimation to map gaze to detected fiducial markers.*
+Please visit [ArGaze documentation website](http://achil.recherche.enac.fr/features/eye/argaze/index.html) to get started. \ No newline at end of file
diff --git a/docs/contributor_guide/build_documentation.md b/docs/contributor_guide/build_documentation.md
index 20df1df..b8fe7e3 100644
--- a/docs/contributor_guide/build_documentation.md
+++ b/docs/contributor_guide/build_documentation.md
@@ -5,7 +5,7 @@ ArGaze has a standard MkDocs layout which is built by Read the Docs. You can bui
!!! note
- *Consider that all inline commands below have to be executed into ArGaze root folder.*
+ *Consider that all inline commands below have to be executed at the root of ArGaze package folder.*
Install required Python dependencies (MkDocs etc.):
```console
diff --git a/docs/contributor_guide/unitary_test.md b/docs/contributor_guide/unitary_test.md
index 42cf24c..36010bf 100644
--- a/docs/contributor_guide/unitary_test.md
+++ b/docs/contributor_guide/unitary_test.md
@@ -1,12 +1,11 @@
Unitary test
============
-ArGaze package unitary tests are based on [unittest](https://docs.python.org/fr/3.10/library/unittest.html) module.
-Test files tree structure is mirroring the file tree structure of src/argaze folder.
+ArGaze package unitary tests are based on [unittest](https://docs.python.org/fr/3.10/library/unittest.html) module. Test files tree structure is mirroring the file tree structure of src/argaze folder.
!!! note
- *Consider that all inline commands below have to be executed into ArGaze root folder.*
+ *Consider that all inline commands below have to be executed at the root of ArGaze package folder.*
To run all unitary tests:
diff --git a/docs/css/extra.css b/docs/css/extra.css
index 32bb71d..b0c7aef 100644
--- a/docs/css/extra.css
+++ b/docs/css/extra.css
@@ -10,14 +10,19 @@
max-width: 100%;
}
+a {color: #0299D2;}
+
.doc > code {color: #0299D2; background: none; border: 0px; font-size: 100%; padding: 0;}
.doc-contents {margin: 0px 0px 0px 1%;}
.doc-module > code {color: #404040;}
.doc-class > code {color: #0299D2;}
.doc-function > code {color: #0299D2;}
-.docutils code, a {color: dimgray;}
+
+.docutils code {color: dimgray;}
+.docutils a {color: dimgray;}
.docutils th, p {color: dimgray;}
+.docutils p {color: dimgray;}
.doc-label > code {border: 0px; border-radius: 15px; padding: 2px 8px; font-weight: bold; color: white;}
diff --git a/docs/img/aoi_projection.png b/docs/img/aoi_projection.png
new file mode 100644
index 0000000..a83b9cd
--- /dev/null
+++ b/docs/img/aoi_projection.png
Binary files differ
diff --git a/docs/img/aoi_scan_path.png b/docs/img/aoi_scan_path.png
new file mode 100644
index 0000000..7cac491
--- /dev/null
+++ b/docs/img/aoi_scan_path.png
Binary files differ
diff --git a/docs/user_guide/ar_environment_axis.png b/docs/img/ar_environment_axis.png
index 01c1791..01c1791 100644
--- a/docs/user_guide/ar_environment_axis.png
+++ b/docs/img/ar_environment_axis.png
Binary files differ
diff --git a/docs/img/aruco_dictionaries.png b/docs/img/aruco_dictionaries.png
new file mode 100644
index 0000000..ed5f287
--- /dev/null
+++ b/docs/img/aruco_dictionaries.png
Binary files differ
diff --git a/docs/img/aruco_scene.png b/docs/img/aruco_scene.png
new file mode 100644
index 0000000..611676e
--- /dev/null
+++ b/docs/img/aruco_scene.png
Binary files differ
diff --git a/docs/img/camera_calibrated.png b/docs/img/camera_calibrated.png
new file mode 100644
index 0000000..5f648a8
--- /dev/null
+++ b/docs/img/camera_calibrated.png
Binary files differ
diff --git a/docs/img/camera_calibration.png b/docs/img/camera_calibration.png
new file mode 100644
index 0000000..39b5f9c
--- /dev/null
+++ b/docs/img/camera_calibration.png
Binary files differ
diff --git a/docs/img/camera_calibration_step.png b/docs/img/camera_calibration_step.png
new file mode 100644
index 0000000..8d696a8
--- /dev/null
+++ b/docs/img/camera_calibration_step.png
Binary files differ
diff --git a/docs/img/camera_distorsion.png b/docs/img/camera_distorsion.png
new file mode 100644
index 0000000..314f04a
--- /dev/null
+++ b/docs/img/camera_distorsion.png
Binary files differ
diff --git a/docs/img/camera_marker_world_axis.png b/docs/img/camera_marker_world_axis.png
new file mode 100644
index 0000000..916f06f
--- /dev/null
+++ b/docs/img/camera_marker_world_axis.png
Binary files differ
diff --git a/docs/img/circle_intersection.png b/docs/img/circle_intersection.png
new file mode 100644
index 0000000..6893d32
--- /dev/null
+++ b/docs/img/circle_intersection.png
Binary files differ
diff --git a/docs/img/contains_point.png b/docs/img/contains_point.png
new file mode 100644
index 0000000..71a1050
--- /dev/null
+++ b/docs/img/contains_point.png
Binary files differ
diff --git a/docs/img/detected_markers.png b/docs/img/detected_markers.png
new file mode 100644
index 0000000..588364d
--- /dev/null
+++ b/docs/img/detected_markers.png
Binary files differ
diff --git a/docs/img/distance.png b/docs/img/distance.png
new file mode 100644
index 0000000..31cd249
--- /dev/null
+++ b/docs/img/distance.png
Binary files differ
diff --git a/docs/img/favicon.ico b/docs/img/favicon.ico
new file mode 100644
index 0000000..ddfdcc4
--- /dev/null
+++ b/docs/img/favicon.ico
Binary files differ
diff --git a/docs/img/fixation_and_saccade.png b/docs/img/fixation_and_saccade.png
new file mode 100644
index 0000000..1bd91b9
--- /dev/null
+++ b/docs/img/fixation_and_saccade.png
Binary files differ
diff --git a/docs/img/get_last_before.png b/docs/img/get_last_before.png
new file mode 100644
index 0000000..97d4170
--- /dev/null
+++ b/docs/img/get_last_before.png
Binary files differ
diff --git a/docs/img/get_last_until.png b/docs/img/get_last_until.png
new file mode 100644
index 0000000..4af2c26
--- /dev/null
+++ b/docs/img/get_last_until.png
Binary files differ
diff --git a/docs/img/heatmap.png b/docs/img/heatmap.png
new file mode 100644
index 0000000..5f07d77
--- /dev/null
+++ b/docs/img/heatmap.png
Binary files differ
diff --git a/docs/img/logo-argaze.png b/docs/img/logo-argaze.png
new file mode 100644
index 0000000..bbf4bdc
--- /dev/null
+++ b/docs/img/logo-argaze.png
Binary files differ
diff --git a/docs/img/overlapping.png b/docs/img/overlapping.png
new file mode 100644
index 0000000..0fc1b72
--- /dev/null
+++ b/docs/img/overlapping.png
Binary files differ
diff --git a/docs/img/point_spread.png b/docs/img/point_spread.png
new file mode 100644
index 0000000..7ee39bc
--- /dev/null
+++ b/docs/img/point_spread.png
Binary files differ
diff --git a/docs/img/pop_last_before.png b/docs/img/pop_last_before.png
new file mode 100644
index 0000000..15d02a0
--- /dev/null
+++ b/docs/img/pop_last_before.png
Binary files differ
diff --git a/docs/img/pop_last_until.png b/docs/img/pop_last_until.png
new file mode 100644
index 0000000..94b0c37
--- /dev/null
+++ b/docs/img/pop_last_until.png
Binary files differ
diff --git a/docs/img/pose_estimation.png b/docs/img/pose_estimation.png
new file mode 100644
index 0000000..d814575
--- /dev/null
+++ b/docs/img/pose_estimation.png
Binary files differ
diff --git a/docs/img/scan_path.png b/docs/img/scan_path.png
new file mode 100644
index 0000000..1c77598
--- /dev/null
+++ b/docs/img/scan_path.png
Binary files differ
diff --git a/docs/img/simone_aoi_scene.png b/docs/img/simone_aoi_scene.png
new file mode 100644
index 0000000..0273b79
--- /dev/null
+++ b/docs/img/simone_aoi_scene.png
Binary files differ
diff --git a/docs/img/simone_aruco_scene.png b/docs/img/simone_aruco_scene.png
new file mode 100644
index 0000000..ec165cc
--- /dev/null
+++ b/docs/img/simone_aruco_scene.png
Binary files differ
diff --git a/docs/img/simone_cockpit.png b/docs/img/simone_cockpit.png
new file mode 100644
index 0000000..4ffb2ad
--- /dev/null
+++ b/docs/img/simone_cockpit.png
Binary files differ
diff --git a/docs/img/simone_cockpit_3d.png b/docs/img/simone_cockpit_3d.png
new file mode 100644
index 0000000..92ded51
--- /dev/null
+++ b/docs/img/simone_cockpit_3d.png
Binary files differ
diff --git a/docs/img/simone_projection.png b/docs/img/simone_projection.png
new file mode 100644
index 0000000..bcbe4c0
--- /dev/null
+++ b/docs/img/simone_projection.png
Binary files differ
diff --git a/docs/img/vision_cone.png b/docs/img/vision_cone.png
new file mode 100644
index 0000000..19c5583
--- /dev/null
+++ b/docs/img/vision_cone.png
Binary files differ
diff --git a/docs/index.md b/docs/index.md
index 563ed56..7e679e3 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -1 +1,19 @@
-{!README.md!}
+---
+title: What is ArGaze?
+---
+
+# Enable gaze tracking in AR environment
+
+**Useful links**: [Installation](installation) | [Source Repository](https://git.recherche.enac.fr/projects/argaze/repository) | [Issue Tracker](https://git.recherche.enac.fr/projects/argaze/issues) | [Contact](mailto:achil-contact@recherche.enac.fr)
+
+**ArGaze** python toolkit provides solutions to build 3D modeled **Augmented Reality (AR)** environment defining **Areas Of Interest (AOI)** mapped on <a href="https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html" target="_blank">OpenCV ArUco markers</a> and so ease experimentation design with wearable eye tracker device.
+
+Further, tracked gaze can be projected onto AR environment for live or post **gaze analysis** thanks to **timestamped data** features.
+
+![AR environment axis](img/ar_environment_axis.png)
+
+ArGaze can be combined with any wearable eye tracking device python library like Tobii or Pupil glasses.
+
+!!! note
+
+ *This work is greatly inspired by [Andrew T. Duchowski, Vsevolod Peysakhovich and Krzysztof Krejtz article](https://git.recherche.enac.fr/attachments/download/1990/Using_Pose_Estimation_to_Map_Gaze_to_Detected_Fidu.pdf) about using pose estimation to map gaze to detected fiducial markers.*
diff --git a/docs/getting_started/installation.md b/docs/installation.md
index 327e79a..4b84a19 100644
--- a/docs/getting_started/installation.md
+++ b/docs/installation.md
@@ -1,9 +1,23 @@
-Install ArGaze
-==============
+---
+title: Package installation
+---
+
+How to install ArGaze
+=====================
+
+!!! warning
+
+ *ArGaze is not yet in public repository: [contact ACHIL laboratory](mailto:achil-contact@recherche.enac.fr) to get in touch.*
+
+Clone ArGaze repository:
+
+```console
+git clone ssh://git@git.recherche.enac.fr/interne-ihm-aero/eye-tracking/argaze.git
+```
!!! note
- *Consider that all inline commands below have to be executed into ArGaze root folder.*
+ *Consider that all inline commands below have to be executed at the root of ArGaze package folder.*
Install build tool package:
@@ -23,7 +37,7 @@ pip install ./dist/argaze-VERSION.whl
!!! note
- **As Argaze library developper**
+ **As Argaze library contributor**
*You should prefer to install the package in developer mode to test live code changes:*
```
diff --git a/docs/license.md b/docs/license.md
new file mode 100644
index 0000000..6a51b7d
--- /dev/null
+++ b/docs/license.md
@@ -0,0 +1,5 @@
+---
+title: License
+---
+
+{!LICENSE!}
diff --git a/docs/logo-large.png b/docs/logo-large.png
deleted file mode 100644
index cf0a214..0000000
--- a/docs/logo-large.png
+++ /dev/null
Binary files differ
diff --git a/docs/use_cases/simone_a320_cockpit_simulator.md b/docs/use_cases/simone_a320_cockpit_simulator.md
new file mode 100644
index 0000000..eceed10
--- /dev/null
+++ b/docs/use_cases/simone_a320_cockpit_simulator.md
@@ -0,0 +1,28 @@
+---
+title: SimOne A320 cockpit simulator
+---
+
+SimOne A320 cockpit simulator
+=============================
+
+The [ACHIL platform](http://achil.recherche.enac.fr) have a A320 glass cockpit simulator usually operated by ENAC’s Air Transportation department for system engineering courses to students. It is also used during MCTA training to give them an overview of the pilot’s counterpart. As this cockpit is no longer certified, it can be modified for research purposes and prototyping. It can also be connected to any simulation ran on the platform and integrate the rest of traffic.
+
+In order to track pilots gaze during experimentation, a set of ArUco markers have been positioned to cover most of cockpit workspace.
+
+![Cockpit](../../img/simone_cockpit.png)
+
+Then, in order to build AR environment from such complex geometry workspace, a 3D LIDAR scanner have been used to get a 3D scan of cockpit.
+
+![Cockpit 3D](../../img/simone_cockpit_3d.png)
+
+The 3D scan have been loaded in a 3D editor to help in ArUco markers and AOI poses reporting.
+
+![ArUco scene](../../img/simone_aruco_scene.png) ![AOI scene](../../img/simone_aoi_scene.png)
+
+Finally, a python script connect Tobii eyetracker glasses to ArGaze toolkit. The 3D AR environment is loaded then, ArUco markers are detected from Tobii eyetracker field camera stream allowing to estimate pilote head pose. The AOI are projected into camera frame then, gaze positions are analyzed to identify fixations and saccades to finally check if fixations matched any projected AOI.
+
+![AOI and gaze projection](../../img/simone_projection.png)
+
+A demonstration movie to see ArUco detection and AOI projection in real time
+
+![type:video](https://git.recherche.enac.fr/attachments/download/1991/marker_detection_and_aoi_projection.mp4) \ No newline at end of file
diff --git a/docs/user_guide/ar_environment/environment_exploitation.md b/docs/user_guide/ar_environment/environment_exploitation.md
new file mode 100644
index 0000000..db40385
--- /dev/null
+++ b/docs/user_guide/ar_environment/environment_exploitation.md
@@ -0,0 +1,36 @@
+Environment exploitation
+========================
+
+Once loaded, AR environment assets can be exploited as illustrated below:
+
+```python
+# Access to AR environment ArUco detector passing it a frame where to detect ArUco markers
+ar_environment.aruco_detector.detect_markers(frame)
+
+# Access to an AR environment scene
+my_first_scene = ar_environment.scenes['my first AR scene']
+
+try:
+
+ # Try to estimate AR scene pose from detected markers
+ tvec, rmat, consistent_markers = my_first_scene.estimate_pose(ar_environment.aruco_detector.detected_markers)
+
+ # Project AR scene into camera frame according estimated pose
+ # Optional visual_hfov argument is set to 160° to clip AOI scene according a cone vision
+ aoi2D_scene = my_first_scene.project(tvec, rmat, visual_hfov=160)
+
+ # Draw estimated AR scene axis
+ my_first_scene.draw_axis(frame)
+
+ # Draw AOI2D scene projection
+ aoi2D_scene.draw(frame)
+
+ # Do something with AOI2D scene projection
+ ...
+
+# Catch exceptions raised by estimate_pose and project methods
+except (ArFeatures.PoseEstimationFailed, ArFeatures.SceneProjectionFailed) as e:
+
+ print(e)
+
+```
diff --git a/docs/user_guide/ar_environment/environment_setup.md b/docs/user_guide/ar_environment/environment_setup.md
new file mode 100644
index 0000000..b63f64a
--- /dev/null
+++ b/docs/user_guide/ar_environment/environment_setup.md
@@ -0,0 +1,77 @@
+Environment Setup
+=================
+
+AR environment setup is loaded from JSON file format.
+
+Each AR environment defines a unique ArUco detector dedicated to detection of markers from a specific ArUco dictionary and with a given size. However, it is possible to load multiple AR scenes into a same AR environment.
+
+Here is JSON environment file example where it is assumed that mentioned .obj files are located relatively to the environment file on disk.
+
+```
+{
+ "name": "my AR environment",
+ "aruco_detector": {
+ "dictionary": {
+ "name": "DICT_APRILTAG_16h5"
+ }
+ "marker_size": 5,
+ "camera": {
+ "rms": 0.6,
+ "dimensions": [
+ 1920,
+ 1080
+ ],
+ "K": [
+ [
+ 1135,
+ 0.0,
+ 956
+ ],
+ [
+ 0.0,
+ 1135,
+ 560
+ ],
+ [
+ 0.0,
+ 0.0,
+ 1.0
+ ]
+ ],
+ "D": [
+ 0.01655492265003404,
+ 0.1985524264972037,
+ 0.002129965902489484,
+ -0.0019528582922179365,
+ -0.5792910353639452
+ ]
+ },
+ "parameters": {
+ "cornerRefinementMethod": 3,
+ "aprilTagQuadSigma": 2,
+ "aprilTagDeglitch": 1
+ }
+ },
+ "scenes": {
+ "my first AR scene" : {
+ "aruco_scene": "./first_scene/markers.obj",
+ "aoi_scene": "./first_scene/aoi.obj",
+ "angle_tolerance": 15.0,
+ "distance_tolerance": 2.54
+ },
+ "my second AR scene" : {
+ "aruco_scene": "./second_scene/markers.obj",
+ "aoi_scene": "./second_scene/aoi.obj",
+ "angle_tolerance": 15.0,
+ "distance_tolerance": 2.54
+ }
+ }
+}
+```
+
+```python
+from argaze import ArFeatures
+
+# Load AR environment
+ar_environment = ArFeatures.ArEnvironment.from_json('./environment.json')
+```
diff --git a/docs/user_guide/ar_environment/introduction.md b/docs/user_guide/ar_environment/introduction.md
new file mode 100644
index 0000000..608cdb4
--- /dev/null
+++ b/docs/user_guide/ar_environment/introduction.md
@@ -0,0 +1,6 @@
+AR environment setup
+====================
+
+ArGaze toolkit eases ArUco and AOI management in a single AR environment setup.
+
+This section refers to [ArFeatures](/aragaze/#argaze.ArFeatures).
diff --git a/docs/user_guide/areas_of_interest/aoi_frame.md b/docs/user_guide/areas_of_interest/aoi_frame.md
new file mode 100644
index 0000000..855e302
--- /dev/null
+++ b/docs/user_guide/areas_of_interest/aoi_frame.md
@@ -0,0 +1,43 @@
+---
+title: AOI frame
+---
+
+AOI Frame
+=========
+
+[AOIFeatures](/argaze/#argaze/AreaOfInterest.AOIFeatures) provides [AOIFrame](/argaze/#argaze/AreaOfInterest.AOIFeatures.AOIFrame) class to draw into an 2D AOI.
+
+## Point spread
+
+The **point_spread** method draw a gaussian point spread into the frame at a given pointer position.
+
+![Point spread](../../img/point_spread.png)
+
+## Heatmap
+
+Heatmap visualisation allows to show where a pointer is most of the time.
+
+![Heatmap](../../img/heatmap.png)
+
+```python
+from argaze.AreaOfInterest import AOIFeatures
+
+# Create an AOI
+aoi = AOIFeatures.AreaOfInterest([[0, 0], [1, 0], [1, 1], [0, 1]])
+
+# Create AOIFrame related to this AOI with 800px * 600px resolution
+aoi_frame = AOIFeatures.AOIFrame(aoi, (800, 600))
+
+# Initialize heatmap
+aoi_frame.heatmap_init()
+
+# Assuming a pointer position (x, y) is moving inside frame
+...:
+
+ # Update heatmap at pointer position
+ aoi_frame.heatmap_update((x, y), sigma=0.05)
+
+ # Do something with heatmap picture
+ ... aoi_frame.heatmap
+
+``` \ No newline at end of file
diff --git a/docs/user_guide/areas_of_interest/aoi_matching.md b/docs/user_guide/areas_of_interest/aoi_matching.md
new file mode 100644
index 0000000..3bffeb9
--- /dev/null
+++ b/docs/user_guide/areas_of_interest/aoi_matching.md
@@ -0,0 +1,47 @@
+---
+title: AOI matching
+---
+
+AOI matching
+============
+
+Once AOI3D scene is projected into a frame as AOI2D scene, it could be needed to know which AOI2D is looked.
+The AOI class in [AOIFeatures](/argaze/#argaze.AreaOfInterest.AOIFeatures) provides two ways to accomplish such task.
+
+## Pointer-based matching
+
+Test if 2D pointer is inside or not AOI using contains_point() method as illustrated below.
+
+![Contains point](../../img/contains_point.png)
+
+``` python
+pointer = (x, y)
+
+for name, aoi in aoi2D_scene.items():
+
+ if aoi.contains_point(pointer):
+
+ # Do something with looked aoi
+ ...
+
+```
+
+It is also possible to get where a pointer is looking inside an AOI provided that AOI is a rectangular plane:
+
+``` python
+
+inner_x, inner_y = aoi.inner_axis(pointer)
+
+```
+
+## Circle-based matching
+
+As positions have limited accuracy, it is possible to define a radius around a pointer to test circle intersection with AOI.
+
+![Circle intersection](../../img/circle_intersection.png)
+
+``` python
+
+intersection_shape, intersection_aoi_ratio, intersection_circle_ratio = aoi.circle_intersection(pointer, radius)
+
+```
diff --git a/docs/user_guide/areas_of_interest/aoi_projection.md b/docs/user_guide/areas_of_interest/aoi_projection.md
new file mode 100644
index 0000000..7ffc42d
--- /dev/null
+++ b/docs/user_guide/areas_of_interest/aoi_projection.md
@@ -0,0 +1,22 @@
+---
+title: AOI projection
+---
+
+AOI projection
+==============
+
+An AOI3D scene can be rotated and translated according to a pose estimation before to project it onto camera frame as an AOI2D scene.
+
+![AOI projection](../../img/aoi_projection.png)
+
+``` python
+...
+
+# Assuming pose estimation is done (tvec and rmat)
+
+# Project AOI 3D scene according pose estimation and camera intrinsic parameters
+aoi2D_scene = aoi3D_scene.project(tvec, rmat, aruco_camera.K)
+
+# Draw AOI 2D scene
+aoi2D_scene.draw(frame)
+```
diff --git a/docs/user_guide/areas_of_interest/aoi_scene_description.md b/docs/user_guide/areas_of_interest/aoi_scene_description.md
new file mode 100644
index 0000000..dcda74b
--- /dev/null
+++ b/docs/user_guide/areas_of_interest/aoi_scene_description.md
@@ -0,0 +1,57 @@
+---
+title: AOI scene description
+---
+
+AOI scene description
+=====================
+
+An AOI3D scene is built from a 3D model with all AOI as 3D planes and loaded through OBJ file format.
+Notice that plane normals are not needed and planes are not necessary 4 vertices shapes.
+
+``` obj
+o PIC_ND
+v 6.513238 -27.113548 -25.163900
+v 22.994461 -27.310783 -24.552130
+v 6.718690 -6.467261 -26.482569
+v 23.252594 -6.592890 -25.873484
+f 1 2 4 3
+o PIC_ND_Aircraft
+v 6.994747 -21.286463 -24.727146
+v 22.740919 -21.406120 -24.147078
+v 7.086208 -12.096219 -25.314123
+v 22.832380 -12.215876 -24.734055
+f 5 6 8 7
+o PIC_ND_Wind
+v 7.086199 -11.769333 -25.335127
+v 12.081032 -11.807289 -25.151123
+v 7.115211 -8.854101 -25.521320
+v 12.110044 -8.892057 -25.337317
+f 9 10 12 11
+o PIC_ND_Waypoint
+v 17.774197 -11.819057 -24.943428
+v 22.769030 -11.857013 -24.759424
+v 17.803209 -8.903825 -25.129622
+v 22.798042 -8.941781 -24.945618
+f 13 14 16 15
+...
+o Thrust_Lever
+v 19.046124 15.523837 4.774072
+v 18.997263 -0.967944 5.701000
+v 18.988382 15.923470 -13.243046
+v 18.921808 -0.417994 -17.869610
+v 19.032232 19.241346 -3.040264
+v 19.020988 6.392717 5.872663
+v 18.945322 6.876906 -17.699480
+s off
+f 185 190 186 188 191 187 189
+...
+```
+
+Here is a sample of code to show the loading of an AOI3D scene from an OBJ file description:
+
+``` python
+from argaze.AreaOfInterest import AOI3DScene
+
+# Load an AOI3D scene from OBJ file
+aoi3D_scene = AOI3DScene.AOI3DScene.from_obj('./aoi_scene.obj')
+```
diff --git a/docs/user_guide/areas_of_interest/introduction.md b/docs/user_guide/areas_of_interest/introduction.md
new file mode 100644
index 0000000..7d44785
--- /dev/null
+++ b/docs/user_guide/areas_of_interest/introduction.md
@@ -0,0 +1,8 @@
+About Areas Of Interest (AOI)
+=============================
+
+The [AreaOfInterest submodule](/argaze/#argaze.AreaOfInterest) allows to deal with AOI in a AR environment through high level classes:
+
+* [AOIFeatures](/argaze/#argaze.AreaOfInterest.AOIFeatures)
+* [AOI3DScene](/argaze/#argaze.AreaOfInterest.AOI3DScene)
+* [AOI2DScene](/argaze/#argaze.AreaOfInterest.AOI2DScene) \ No newline at end of file
diff --git a/docs/user_guide/areas_of_interest/vision_cone_filtering.md b/docs/user_guide/areas_of_interest/vision_cone_filtering.md
new file mode 100644
index 0000000..e2b31ea
--- /dev/null
+++ b/docs/user_guide/areas_of_interest/vision_cone_filtering.md
@@ -0,0 +1,18 @@
+Vision cone filtering
+=====================
+
+The AOI3D scene provides cone clipping support in order to select only AOI which are inside vision cone field.
+
+![Vision cone](../../img/vision_cone.png)
+
+``` python
+# Transform scene into camera referential
+aoi3D_camera = aoi3D_scene.transform(tvec, rmat)
+
+# Get aoi inside vision cone field
+# The vision cone tip is positionned behind the head
+aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(cone_radius=300, cone_height=150, cone_tip=[0., 0., -20.])
+
+# Keep only aoi inside vision cone field
+aoi3D_scene = aoi3D_scene.copy(exclude=aoi3D_outside.keys())
+```
diff --git a/docs/user_guide/aruco_markers/camera_calibration.md b/docs/user_guide/aruco_markers/camera_calibration.md
new file mode 100644
index 0000000..2a1ba84
--- /dev/null
+++ b/docs/user_guide/aruco_markers/camera_calibration.md
@@ -0,0 +1,83 @@
+Camera calibration
+==================
+
+Any camera device have to be calibrated to compensate its optical distorsion.
+
+![Camera calibration](../../img/camera_calibration.png)
+
+The first step to calibrate a camera is to create an ArUco calibration board like in the code below:
+
+``` python
+from argaze.ArUcoMarkers import ArUcoMarkersDictionary, ArUcoBoard
+
+# Create ArUco dictionary
+aruco_dictionary = ArUcoMarkersDictionary.ArUcoMarkersDictionary('DICT_APRILTAG_16h5')
+
+# Create an ArUco board of 7 columns and 5 rows with 5 cm squares with 3cm ArUco markers inside
+aruco_board = ArUcoBoard.ArUcoBoard(7, 5, 5, 3, aruco_dictionary)
+
+# Export ArUco board with 300 dpi resolution
+aruco_board.save('./calibration_board.png', 300)
+```
+
+Then, the calibration process needs to make many different captures of an ArUco board through the camera and then, pass them to an ArUco detector instance.
+
+![Calibration step](../../img/camera_calibration_step.png)
+
+The sample of code below shows how to detect board corners into camera frames, store detected corners then process them to build calibration data and, finally, save it into a JSON file:
+
+``` python
+from argaze.ArUcoMarkers import ArUcoMarkersDictionary, ArUcoCamera, ArUcoBoard, ArUcoDetector
+
+# Create ArUco dictionary
+aruco_dictionary = ArUcoMarkersDictionary.ArUcoMarkersDictionary('DICT_APRILTAG_16h5')
+
+# Create ArUco camera
+aruco_camera = ArUcoCamera.ArUcoCamera(dimensions=(1920, 1080))
+
+# Create ArUco board of 7 columns and 5 rows with 5 cm squares with 3cm aruco markers inside
+# Note: This board is the one expected during further board tracking
+expected_aruco_board = ArUcoBoard.ArUcoBoard(7, 5, 5, 3, aruco_dictionary)
+
+# Create ArUco detector
+aruco_detector = ArUcoDetector.ArUcoDetector(dictionary=aruco_dictionary, marker_size=3)
+
+# Capture frames from a live Full HD video stream (1920x1080)
+while video_stream.is_alive():
+
+ frame = video_stream.read()
+
+ # Detect all board corners in frame
+ aruco_detector.detect_board(frame, expected_aruco_board, expected_aruco_board.markers_number)
+
+ # If board corners are detected
+ if aruco_detector.board_corners_number > 0:
+
+ # Draw board corners to show that board tracking succeeded
+ aruco_detector.draw_board(frame)
+
+ # Append tracked board data for further calibration processing
+ aruco_camera.store_calibration_data(aruco_detector.board_corners, aruco_detector.board_corners_identifier)
+
+# Start camera calibration processing for Full HD image resolution
+print('Calibrating camera...')
+aruco_camera.calibrate(expected_aruco_board)
+
+# Print camera calibration data
+print('Calibration succeeded!')
+print(f'RMS:{aruco_camera.rms}')
+print(f'Camera matrix:{aruco_camera.K}')
+print(f'Distortion coefficients:{aruco_camera.D}')
+
+# Save camera calibration data
+aruco_camera.to_json('calibration.json')
+```
+
+Then, the camera calibration data are loaded to compensate optical distorsion during ArUco marker detection:
+
+``` python
+from argaze.ArUcoMarkers import ArUcoCamera
+
+# Load camera calibration data
+aruco_camera = ArUcoCamera.ArUcoCamera.from_json('./calibration.json')
+```
diff --git a/docs/user_guide/aruco_markers/dictionary_selection.md b/docs/user_guide/aruco_markers/dictionary_selection.md
new file mode 100644
index 0000000..b9ba510
--- /dev/null
+++ b/docs/user_guide/aruco_markers/dictionary_selection.md
@@ -0,0 +1,17 @@
+Dictionary selection
+====================
+
+ArUco markers always belongs to a set of markers called ArUco markers dictionary.
+
+![ArUco dictionaries](../../img/aruco_dictionaries.png)
+
+Many ArUco dictionaries exist with properties concerning the format, the number of markers or the difference between each markers to avoid error in tracking.
+
+Here is the documention [about ArUco markers dictionaries](https://docs.opencv.org/3.4/d9/d6a/group__aruco.html#gac84398a9ed9dd01306592dd616c2c975).
+
+``` python
+from argaze.ArUcoMarkers import ArUcoMarkersDictionary
+
+# Create a dictionary of specific April tags
+aruco_dictionary = ArUcoMarkersDictionary.ArUcoMarkersDictionary('DICT_APRILTAG_16h5')
+```
diff --git a/docs/user_guide/aruco_markers/introduction.md b/docs/user_guide/aruco_markers/introduction.md
new file mode 100644
index 0000000..59795b5
--- /dev/null
+++ b/docs/user_guide/aruco_markers/introduction.md
@@ -0,0 +1,14 @@
+About Aruco markers
+===================
+
+![OpenCV ArUco markers](https://pyimagesearch.com/wp-content/uploads/2020/12/aruco_generate_tags_header.png)
+
+The OpenCV library provides a module to detect fiducial markers into a picture and estimate its pose (cf [OpenCV ArUco tutorial page](https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html)).
+
+The ArGaze [ArUcoMarkers submodule](/argaze/#argaze.ArUcoMarkers) eases markers creation, camera calibration, markers detection and 3D scene pose estimation through a set of high level classes:
+
+* [ArUcoMarkersDictionary](/argaze/#argaze.ArUcoMarkers.ArUcoMarkersDictionary)
+* [ArUcoBoard](/argaze/#argaze.ArUcoMarkers.ArUcoBoard)
+* [ArUcoCamera](/argaze/#argaze.ArUcoMarkers.ArUcoCamera)
+* [ArUcoDetector](/argaze/#argaze.ArUcoMarkers.ArUcoDetector)
+* [ArUcoScene](/argaze/#argaze.ArUcoMarkers.ArUcoScene) \ No newline at end of file
diff --git a/docs/user_guide/aruco_markers/markers_creation.md b/docs/user_guide/aruco_markers/markers_creation.md
new file mode 100644
index 0000000..9909dc7
--- /dev/null
+++ b/docs/user_guide/aruco_markers/markers_creation.md
@@ -0,0 +1,17 @@
+Markers creation
+================
+
+The creation of ArUco markers from a dictionary is illustrated in the code below:
+
+``` python
+from argaze.ArUcoMarkers import ArUcoMarkersDictionary
+
+# Create a dictionary of specific April tags
+aruco_dictionary = ArUcoMarkersDictionary.ArUcoMarkersDictionary('DICT_APRILTAG_16h5')
+
+# Export marker n°5 as 3.5 cm picture with 300 dpi resolution
+aruco_dictionary.create_marker(5, 3.5).save('./markers/', 300)
+
+# Export all dictionary markers as 3.5 cm pictures with 300 dpi resolution
+aruco_dictionary.save('./markers/', 3.5, 300)
+``` \ No newline at end of file
diff --git a/docs/user_guide/aruco_markers/markers_detection.md b/docs/user_guide/aruco_markers/markers_detection.md
new file mode 100644
index 0000000..886ee69
--- /dev/null
+++ b/docs/user_guide/aruco_markers/markers_detection.md
@@ -0,0 +1,47 @@
+Markers detection
+=================
+
+![Detected markers](../../img/detected_markers.png)
+
+Firstly, the ArUco detector needs to know the expected dictionary and size (in centimeter) of the markers it have to detect.
+
+Notice that extra parameters are passed to detector: see [OpenCV ArUco markers detection parameters documentation](https://docs.opencv.org/4.x/d1/dcd/structcv_1_1aruco_1_1DetectorParameters.html) to know more.
+
+``` python
+from argaze.ArUcoMarkers import ArUcoDetector, ArUcoCamera
+
+# Assuming camera calibration data are loaded
+
+# Loading extra detector parameters
+extra_parameters = ArUcoDetector.DetectorParameters.from_json('./detector_parameters.json')
+
+# Create ArUco detector to track DICT_APRILTAG_16h5 5cm length markers
+aruco_detector = ArUcoDetector.ArUcoDetector(camera=aruco_camera, dictionary='DICT_APRILTAG_16h5', marker_size=5, parameters=extra_parameters)
+```
+
+Here is detector parameters JSON file example:
+
+```
+{
+ "cornerRefinementMethod": 1,
+ "aprilTagQuadSigma": 2,
+ "aprilTagDeglitch": 1
+}
+```
+
+The ArUco detector processes frame to detect markers and allows to draw detection results onto it:
+
+``` python
+# Detect markers into a frame and draw them
+aruco_detector.detect_markers(frame)
+aruco_detector.draw_detected_markers(frame)
+
+# Get corners position into frame related to each detected markers
+for marker_id, marker in aruco_detector.detected_markers.items():
+
+ print(f'marker {marker_id} corners: ', marker.corners)
+
+ # Do something with detected marker i corners
+ ...
+
+```
diff --git a/docs/user_guide/aruco_markers/markers_pose_estimation.md b/docs/user_guide/aruco_markers/markers_pose_estimation.md
new file mode 100644
index 0000000..2459715
--- /dev/null
+++ b/docs/user_guide/aruco_markers/markers_pose_estimation.md
@@ -0,0 +1,20 @@
+Markers pose estimation
+=======================
+
+After marker detection, it is possible to estimate markers pose in camera axis.
+
+![Pose estimation](../../img/pose_estimation.png)
+
+``` python
+# Estimate markers pose
+aruco_detector.estimate_markers_pose()
+
+# Get pose estimation related to each detected markers
+for marker_id, marker in aruco_detector.detected_markers.items():
+
+ print(f'marker {marker_id} translation: ', marker.translation)
+ print(f'marker {marker_id} rotation: ', marker.rotation)
+
+ # Do something with each marker pose estimation
+ ...
+``` \ No newline at end of file
diff --git a/docs/user_guide/aruco_markers/markers_scene_description.md b/docs/user_guide/aruco_markers/markers_scene_description.md
new file mode 100644
index 0000000..9938f23
--- /dev/null
+++ b/docs/user_guide/aruco_markers/markers_scene_description.md
@@ -0,0 +1,117 @@
+Markers scene description
+=========================
+
+The ArGaze toolkit provides ArUcoScene class to describe where ArUco markers are placed into a 3D model.
+
+![ArUco scene](../../img/aruco_scene.png)
+
+ArUco scene is useful to:
+
+* filter markers that belongs to this predefined scene,
+* check the consistency of detected markers according the place where each marker is expected to be,
+* estimate the pose of the scene from the pose of detected markers.
+
+ArUco scene description uses common OBJ file format that can be exported from most 3D editors. Notice that plane normals (vn) needs to be exported.
+
+``` obj
+o DICT_APRILTAG_16h5#0_Marker
+v -3.004536 0.022876 2.995370
+v 2.995335 -0.015498 3.004618
+v -2.995335 0.015498 -3.004618
+v 3.004536 -0.022876 -2.995370
+vn 0.0064 1.0000 -0.0012
+s off
+f 1//1 2//1 4//1 3//1
+o DICT_APRILTAG_16h5#1_Marker
+v -33.799068 46.450645 -32.200436
+v -27.852505 47.243549 -32.102116
+v -34.593925 52.396473 -32.076626
+v -28.647360 53.189377 -31.978306
+vn -0.0135 -0.0226 0.9997
+s off
+f 5//2 6//2 8//2 7//2
+...
+```
+
+ArUco scene description can also be written in a JSON file format.
+
+``` json
+{
+ "dictionary": "DICT_ARUCO_ORIGINAL",
+ "marker_size": 1,
+ "places": {
+ "0": {
+ "translation": [0, 0, 0],
+ "rotation": [0, 0, 0]
+ },
+ "1": {
+ "translation": [10, 10, 0],
+ "rotation": [0, 0, 0]
+ },
+ "2": {
+ "translation": [0, 10, 0],
+ "rotation": [0, 0, 0]
+ }
+ }
+}
+```
+
+Here is a sample of code to show the loading of an ArUcoScene OBJ file description:
+
+``` python
+from argaze.ArUcoMarkers import ArUcoScene
+
+# Create an ArUco scene from a OBJ file description
+aruco_scene = ArUcoScene.ArUcoScene.from_obj('./markers.obj')
+
+# Print loaded marker places
+for place_id, place in aruco_scene.places.items():
+
+ print(f'place {place_id} for marker: ', place.marker.identifier)
+ print(f'place {place_id} translation: ', place.translation)
+ print(f'place {place_id} rotation: ', place.rotation)
+```
+
+## Markers filtering
+
+Considering markers are detected, here is how to filter them to consider only those which belongs to the scene:
+
+``` python
+scene_markers, remaining_markers = aruco_scene.filter_markers(aruco_detector.detected_markers)
+```
+
+## Marker poses consistency
+
+Then, scene markers poses can be validated by verifying their spatial consistency considering angle and distance tolerance. This is particularly useful to discard ambiguous marker pose estimations when markers are parallel to camera plane (see [issue on OpenCV Contribution repository](https://github.com/opencv/opencv_contrib/issues/3190#issuecomment-1181970839)).
+
+``` python
+# Check scene markers consistency with 10° angle tolerance and 1 cm distance tolerance
+consistent_markers, unconsistent_markers, unconsistencies = aruco_scene.check_markers_consistency(scene_markers, 10, 1)
+```
+
+## Scene pose estimation
+
+Several approaches are available to perform ArUco scene pose estimation from markers belonging to the scene.
+
+The first approach considers that scene pose can be estimated **from a single marker pose**:
+
+``` python
+# Let's select one consistent scene marker
+marker_id, marker = consistent_markers.popitem()
+
+# Estimate scene pose from a single marker
+tvec, rmat = self.aruco_scene.estimate_pose_from_single_marker(marker)
+```
+
+The second approach considers that scene pose can be estimated **by averaging several marker poses**:
+
+``` python
+# Estimate scene pose from all consistent scene markers
+tvec, rmat = self.aruco_scene.estimate_pose_from_markers(consistent_markers)
+```
+
+The third approach is only available when ArUco markers are placed in such a configuration that is possible to **define orthogonal axis**:
+
+``` python
+tvec, rmat = self.aruco_scene.estimate_pose_from_axis_markers(origin_marker, horizontal_axis_marker, vertical_axis_marker)
+```
diff --git a/docs/getting_started/demos.md b/docs/user_guide/demos.md
index 1a53c82..1a53c82 100644
--- a/docs/getting_started/demos.md
+++ b/docs/user_guide/demos.md
diff --git a/docs/user_guide/gaze_analysis/gaze_movement.md b/docs/user_guide/gaze_analysis/gaze_movement.md
new file mode 100644
index 0000000..6c7ab76
--- /dev/null
+++ b/docs/user_guide/gaze_analysis/gaze_movement.md
@@ -0,0 +1,141 @@
+Gaze movement
+=============
+
+## Definition
+
+!!! note
+
+ *"The act of classifying eye movements into distinct events is, on a general level, driven by a desire to isolate different intervals of the data stream strongly correlated with certain oculomotor or cognitive properties."*
+
+ Citation from ["One algorithm to rule them all? An evaluation and discussion of ten eye movement event-detection algorithms"](https://link.springer.com/article/10.3758/s13428-016-0738-9) article.
+
+[GazeFeatures](/argaze/#argaze.GazeFeatures) defines abstract [GazeMovement](/argaze/#argaze.GazeFeatures.GazeMovement) class, then abstract [Fixation](/argaze/#argaze.GazeFeatures.Fixation) and [Saccade](/argaze/#argaze.GazeFeatures.Saccade) classes which inherit from [GazeMovement](/argaze/#argaze.GazeFeatures.GazeMovement).
+
+The **positions** [GazeMovement](/argaze/#argaze.GazeFeatures.GazeMovement) attribute contain all [GazePositions](/argaze/#argaze.GazeFeatures.GazePosition) belonging to itself.
+
+![Fixation and Saccade](../../img/fixation_and_saccade.png)
+
+## Identification
+
+[GazeFeatures](/argaze/#argaze.GazeFeatures) defines abstract [GazeMovementIdentifier](/argaze/#argaze.GazeFeatures.GazeMovementIdentifier) classe to let add various identification algorithms.
+
+Some gaze movement identification algorithms are available thanks to [GazeAnalysis](/argaze/#argaze.GazeAnalysis) submodule:
+
+* [Dispersion threshold identification (I-DT)](/argaze/#argaze.GazeAnalysis.DispersionThresholdIdentification)
+* [Velocity threshold identification (I-VT)](/argaze/#argaze.GazeAnalysis.VelocityThresholdIdentification)
+
+### Identify method
+
+[GazeMovementIdentifier](/argaze/#argaze.GazeFeatures.GazeMovementIdentifier) **identify** method allows to fed its identification algorithm with successive gaze positions to output Fixation, Saccade or any kind of GazeMovement instances.
+
+Here is a sample of code based on I-DT algorithm to illustrate how to use it:
+
+``` python
+from argaze import GazeFeatures
+from argaze.GazeAnalysis import DispersionThresholdIdentification
+
+# Create a gaze movement identifier based on dispersion algorithm with 50px max deviation 200 ms max duration thresholds
+gaze_movement_identifier = DispersionThresholdIdentification.GazeMovementIdentifier(50, 200)
+
+# Assuming that timestamped gaze positions are provided through live stream or later data reading
+...:
+
+ gaze_movement = gaze_movement_identifier.identify(timestamp, gaze_position)
+
+ # Fixation identified
+ if GazeFeatures.is_fixation(gaze_movement):
+
+ # Access to first gaze position of identified fixation
+ start_ts, start_position = gaze_movement.positions.first
+
+ # Access to fixation duration
+ print('duration: {gaze_movement.duration}')
+
+ # Iterate over all gaze positions of identified fixation
+ for ts, position in gaze_movement.positions.items():
+
+ # Do something with each fixation position
+ ...
+
+ # Saccade identified
+ elif GazeFeatures.is_saccade(gaze_movement):
+
+ # Access to first gaze position of identified saccade
+ start_ts, start_position = gaze_movement.positions.first
+
+ # Access to saccade amplitude
+ print('amplitude: {gaze_movement.amplitude}')
+
+ # Iterate over all gaze positions of identified saccade
+ for ts, position in gaze_movement.positions.items():
+
+ # Do something with each saccade position
+ ...
+
+ # No gaze movement identified
+ else:
+
+ continue
+
+```
+
+### Browse method
+
+[GazeMovementIdentifier](/argaze/#argaze.GazeFeatures.GazeMovementIdentifier) **browse** method allows to pass a TimeStampedGazePositions buffer to apply identification algorithm on all gaze positions inside.
+
+Identified gaze movements are returned through:
+
+* [TimeStampedGazeMovements](/argaze/#argaze.GazeFeatures.TimeStampedGazeMovements) instance where all fixations are stored by starting gaze position timestamp.
+* [TimeStampedGazeMovements](/argaze/#argaze.GazeFeatures.TimeStampedGazeMovements) instance where all saccades are stored by starting gaze position timestamp.
+* [TimeStampedGazeStatus](/argaze/#argaze.GazeFeatures.TimeStampedGazeStatus) instance where all gaze positions are linked to a fixation or saccade index.
+
+``` python
+# Assuming that timestamped gaze positions are provided through data reading
+
+ts_fixations, ts_saccades, ts_status = gaze_movement_identifier.browse(ts_gaze_positions)
+
+```
+
+* ts_fixations would look like:
+
+|timestamp|positions |duration|dispersion|focus |
+|:--------|:-------------------------------------------------------------|:-------|:---------|:--------|
+|60034 |{"60034":[846,620], "60044":[837,641], "60054":[835,649], ...}|450 |40 |(840,660)|
+|60504 |{"60504":[838,667], "60514":[838,667], "60524":[837,669], ...}|100 |38 |(834,651)|
+|... |... |... |.. |... |
+
+* ts_saccades would look like:
+
+|timestamp|positions |duration|
+|:--------|:---------------------------------------|:-------|
+|60484 |{"60484":[836, 669], "60494":[837, 669]}|10 |
+|60594 |{"60594":[833, 613], "60614":[927, 601]}|20 |
+|... |... |... |
+
+* ts_status would look like:
+
+|timestamp|position |type |index|
+|:--------|:---------|:-------|:----|
+|60034 |(846, 620)|Fixation|1 |
+|60044 |(837, 641)|Fixation|1 |
+|... |... |... |. |
+|60464 |(836, 668)|Fixation|1 |
+|60474 |(836, 668)|Fixation|1 |
+|60484 |(836, 669)|Saccade |1 |
+|60494 |(837, 669)|Saccade |1 |
+|60504 |(838, 667)|Fixation|2 |
+|60514 |(838, 667)|Fixation|2 |
+|... |... |... |. |
+|60574 |(825, 629)|Fixation|2 |
+|60584 |(829, 615)|Fixation|2 |
+|60594 |(833, 613)|Saccade |2 |
+|60614 |(927, 601)|Saccade |2 |
+|60624 |(933, 599)|Fixation|3 |
+|60634 |(934, 603)|Fixation|3 |
+|... |... |... |. |
+
+
+!!! note
+ [TimeStampedGazeMovements](/argaze/#argaze.GazeFeatures.TimeStampedGazeMovements), [TimeStampedGazeMovements](/argaze/#argaze.GazeFeatures.TimeStampedGazeMovements) and [TimeStampedGazeStatus](/argaze/#argaze.GazeFeatures.TimeStampedGazeStatus) classes inherit from [TimeStampedBuffer](/argaze/#argaze.DataStructures.TimeStampedBuffer) class.
+
+ Read [timestamped data] page to understand all features it provides. \ No newline at end of file
diff --git a/docs/user_guide/gaze_analysis/gaze_position.md b/docs/user_guide/gaze_analysis/gaze_position.md
new file mode 100644
index 0000000..67f15f8
--- /dev/null
+++ b/docs/user_guide/gaze_analysis/gaze_position.md
@@ -0,0 +1,68 @@
+Gaze position
+=============
+
+[GazeFeatures](/argaze/#argaze.GazeFeatures) defines a [GazePosition](/argaze/#argaze.GazeFeatures.GazePosition) class to handle point coordinates with a precision value.
+
+``` python
+from argaze import GazeFeatures
+
+# Define a basic gaze position
+gaze_position = GazeFeatures.GazePosition((123, 456))
+
+# Define a gaze position with a precision value
+gaze_position = GazeFeatures.GazePosition((789, 765), precision=10)
+
+# Access to gaze position value and precision
+print(f'position: {gaze_position.value}')
+print(f'precision: {gaze_position.precision}')
+
+```
+
+## Validity
+
+[GazeFeatures](/argaze/#argaze.GazeFeatures) defines also a [UnvalidGazePosition](/argaze/#argaze.GazeFeatures.UnvalidGazePosition) class that inherits from [GazePosition](/argaze/#argaze.GazeFeatures.GazePosition) to handle case where no gaze position exists because of any specific device reason.
+
+``` python
+from argaze import GazeFeatures
+
+# Define a basic unvalid gaze position
+gaze_position = GazeFeatures.UnvalidGazePosition()
+
+# Define a basic unvalid gaze position with a message value
+gaze_position = GazeFeatures.UnvalidGazePosition("Something bad happened")
+
+# Access to gaze position validity
+print(f'validity: {gaze_position.valid}')
+
+```
+
+## Distance
+
+[GazePosition](/argaze/#argaze.GazeFeatures.GazePosition) class provides a **distance** method to calculate the distance to another gaze position instance.
+
+![Distance](../../img/distance.png)
+
+``` python
+# Distance between A and B positions
+d = gaze_position_A.distance(gaze_position_B)
+```
+
+## Overlapping
+
+[GazePosition](/argaze/#argaze.GazeFeatures.GazePosition) class provides an **overlap** method to test if a gaze position overlaps another one considering their precisions.
+
+![Gaze overlapping](../../img/overlapping.png)
+
+``` python
+# Check that A overlaps B
+if gaze_position_A.overlap(gaze_position_B):
+
+ # Do something if A overlaps B
+ ...
+
+# Check that A overlaps B and B overlaps A
+if gaze_position_A.overlap(gaze_position_B, both=True):
+
+ # Do something if A overlaps B AND B overlaps A
+ ...
+```
diff --git a/docs/user_guide/gaze_analysis/introduction.md b/docs/user_guide/gaze_analysis/introduction.md
new file mode 100644
index 0000000..d1bb122
--- /dev/null
+++ b/docs/user_guide/gaze_analysis/introduction.md
@@ -0,0 +1,7 @@
+Gaze analysis
+=============
+
+This section refers to:
+
+* [GazeFeatures](/argaze/#argaze.GazeFeatures)
+* [GazeAnalysis](/argaze/#argaze.GazeAnalysis) \ No newline at end of file
diff --git a/docs/user_guide/gaze_analysis/scan_path.md b/docs/user_guide/gaze_analysis/scan_path.md
new file mode 100644
index 0000000..e00682f
--- /dev/null
+++ b/docs/user_guide/gaze_analysis/scan_path.md
@@ -0,0 +1,168 @@
+Scan path
+=========
+
+[GazeFeatures](/argaze/#argaze.GazeFeatures) defines classes to handle successive fixations/saccades and analyse their spatial or temporal properties.
+
+## Fixation based scan path
+
+### Definition
+
+The [ScanPath](/argaze/#argaze.GazeFeatures.ScanPath) class is defined as a list of [ScanSteps](/argaze/#argaze.GazeFeatures.ScanStep) which are defined as a fixation and a consecutive saccade.
+
+![Fixation based scan path](../../img/scan_path.png)
+
+As fixations and saccades are identified, the scan path is built by calling respectively **append_fixation** and **append_saccade** methods.
+
+### Analysis
+
+[GazeFeatures](/argaze/#argaze.GazeFeatures) defines abstract [ScanPathAnalyzer](/argaze/#argaze.GazeFeatures.ScanPathAnalyzer) classe to let add various analysis algorithms.
+
+Some scan path analysis are available thanks to [GazeAnalysis](/argaze/#argaze.GazeAnalysis) submodule:
+
+* [K-Coefficient](/argaze/#argaze.GazeAnalysis.KCoefficient)
+* [Nearest Neighbor Index](/argaze/#argaze.GazeAnalysis.NearestNeighborIndex)
+
+### Example
+
+Here is a sample of code to illustrate how to built a scan path and analyze it:
+
+``` python
+from argaze import GazeFeatures
+from argaze.GazeAnalysis import KCoefficient
+
+# Create a empty scan path
+scan_path = GazeFeatures.ScanPath()
+
+# Create a K coefficient analyzer
+kc_analyzer = KCoefficient.ScanPathAnalyzer()
+
+# Assuming a gaze movement is identified at ts time
+...:
+
+ # Fixation identified
+ if GazeFeatures.is_fixation(gaze_movement):
+
+ # Append fixation to scan path : no step is created
+ scan_path.append_fixation(ts, gaze_movement)
+
+ # Saccade identified
+ elif GazeFeatures.is_saccade(gaze_movement):
+
+ # Append saccade to scan path : a new step should be created
+ new_step = scan_path.append_saccade(data_ts, gaze_movement)
+
+ # Analyse scan path
+ if new_step:
+
+ K = kc_analyzer.analyze(scan_path)
+
+ # Do something with K metric
+ ...
+```
+
+## AOI based scan path
+
+### Definition
+
+The [AOIScanPath](/argaze/#argaze.GazeFeatures.AOIScanPath) class is defined as a list of [AOIScanSteps](/argaze/#argaze.GazeFeatures.AOIScanStep) which are defined as set of consecutives fixations looking at a same Area Of Interest (AOI) and a consecutive saccade.
+
+![AOI based scan path](../../img/aoi_scan_path.png)
+
+As fixations and saccades are identified, the scan path is built by calling respectively **append_fixation** and **append_saccade** methods.
+
+### Analysis
+
+[GazeFeatures](/argaze/#argaze.GazeFeatures) defines abstract [AOIScanPathAnalyzer](/argaze/#argaze.GazeFeatures.AOIScanPathAnalyzer) classe to let add various analysis algorithms.
+
+Some scan path analysis are available thanks to [GazeAnalysis](/argaze/#argaze.GazeAnalysis) submodule:
+
+* [Transition matrix](/argaze/#argaze.GazeAnalysis.TransitionMatrix)
+* [Entropy](/argaze/#argaze.GazeAnalysis.Entropy)
+* [Lempel-Ziv complexity](/argaze/#argaze.GazeAnalysis.LempelZivComplexity)
+* [N-Gram](/argaze/#argaze.GazeAnalysis.NGram)
+* [K-modified coefficient](/argaze/#argaze.GazeAnalysis.KCoefficient)
+
+### Example
+
+Here is a sample of code to illustrate how to built a AOI scan path and analyze it:
+
+``` python
+from argaze import GazeFeatures
+from argaze.GazeAnalysis import LempelZivComplexity
+
+# Assuming all AOI names are listed
+...
+
+# Create a empty AOI scan path
+aoi_scan_path = GazeFeatures.AOIScanPath(aoi_names)
+
+# Create a Lempel-Ziv complexity analyzer
+lzc_analyzer = LempelZivComplexity.AOIScanPathAnalyzer()
+
+# Assuming a gaze movement is identified at ts time
+...:
+
+ # Fixation identified
+ if GazeFeatures.is_fixation(gaze_movement):
+
+ # Assuming fixation is detected as inside an AOI
+ ...
+
+ # Append fixation to AOI scan path : a new step should be created
+ new_step = aoi_scan_path.append_fixation(ts, gaze_movement, looked_aoi_name)
+
+ # Analyse AOI scan path
+ if new_step:
+
+ LZC = kc_analyzer.analyze(aoi_scan_path)
+
+ # Do something with LZC metric
+ ...
+
+ # Saccade identified
+ elif GazeFeatures.is_saccade(gaze_movement):
+
+ # Append saccade to scan path : no step is created
+ aoi_scan_path.append_saccade(data_ts, gaze_movement)
+
+```
+
+### Advanced
+
+The [AOIScanPath](/argaze/#argaze.GazeFeatures.AOIScanPath) class provides some advanced features to analyse it.
+
+#### String representation
+
+When a new [AOIScanStep](/argaze/#argaze.GazeFeatures.AOIScanStep) is created, the [AOIScanPath](/argaze/#argaze.GazeFeatures.AOIScanPath) internally affects a unique letter index related to its AOI to ease pattern analysis.
+Then, the [AOIScanPath str](/argaze/#argaze.GazeFeatures.AOIScanPath.__str__) representation returns the concatenation of each [AOIScanStep](/argaze/#argaze.GazeFeatures.AOIScanStep) letter.
+The [AOIScanPath get_letter_aoi](/argaze/#argaze.GazeFeatures.AOIScanPath.get_letter_aoi) method helps to get back the AOI related to a letter index.
+
+``` python
+# Assuming the following AOI scan path is built: Foo > Bar > Shu > Foo
+aoi_scan_path = ...
+
+# String representation should be: 'ABCA'
+print(str(aoi_scan_path))
+
+# Output should be: 'Bar'
+print(aoi_scan_path.get_letter_aoi('B'))
+
+```
+
+#### Transition matrix
+
+When a new [AOIScanStep](/argaze/#argaze.GazeFeatures.AOIScanStep) is created, the [AOIScanPath](/argaze/#argaze.GazeFeatures.AOIScanPath) internally counts the number of transitions from an AOI to another AOI to ease Markov chain analysis.
+Then, the [AOIScanPath transition_matrix](/argaze/#argaze.GazeFeatures.AOIScanPath.transition_matrix) property returns a *Pandas DataFrame* where indexes are transition departures and columns are transition destinations.
+
+Here is an exemple of transition matrix for the following [AOIScanPath](/argaze/#argaze.GazeFeatures.AOIScanPath): Foo > Bar > Shu > Foo > Bar
+
+| |Foo|Bar|Shu|
+|:--|:--|:--|:--|
+|Foo|0 |2 |0 |
+|Bar|0 |0 |1 |
+|Shu|1 |0 |0 |
+
+
+#### Fixations count
+
+The [AOIScanPath fixations_count](/argaze/#argaze.GazeFeatures.AOIScanPath.fixations_count) method returns the total number of fixations in the whole scan path and a dictionary to get the fixations count per AOI.
diff --git a/docs/getting_started/ready-made.md b/docs/user_guide/ready-made.md
index 32c475c..32c475c 100644
--- a/docs/getting_started/ready-made.md
+++ b/docs/user_guide/ready-made.md
diff --git a/docs/user_guide/timestamped_data/data_synchronisation.md b/docs/user_guide/timestamped_data/data_synchronisation.md
new file mode 100644
index 0000000..4454fa4
--- /dev/null
+++ b/docs/user_guide/timestamped_data/data_synchronisation.md
@@ -0,0 +1,106 @@
+Data synchronisation
+====================
+
+Recorded data needs to be synchronized to link them before further processings.
+
+The [TimeStampedBuffer](/argaze/#argaze.DataStructures.TimeStampedBuffer) class provides various methods to help in such task.
+
+## Pop last before
+
+![Pop last before](../../img/pop_last_before.png)
+
+The code below shows how to use [pop_last_before](/argaze/#argaze.DataStructures.TimeStampedBuffer.pop_last_before) method in order to synchronise two timestamped data buffers with different timestamps:
+
+``` python
+from argaze import DataStructures
+
+# Assuming A_data_record and B_data_record are [TimeStampedBuffer](/argaze/#argaze.DataStructures.TimeStampedBuffer) instances with different timestamps
+
+for A_ts, A_data in A_data_record.items():
+
+ try:
+
+ # Get nearest B data before current A data and remove all B data before (including the returned one)
+ B_ts, B_data = B_data_record.pop_last_before(A_ts)
+
+ # No data stored before A_ts timestamp
+ except KeyError:
+
+ pass
+
+```
+
+## Pop last until
+
+![Pop last until](../../img/pop_last_until.png)
+
+The code below shows how to use [pop_last_until](/argaze/#argaze.DataStructures.TimeStampedBuffer.pop_last_until) method in order to synchronise two timestamped data buffers with different timestamps:
+
+``` python
+from argaze import DataStructures
+
+# Assuming A_data_record and B_data_record are [TimeStampedBuffer](/argaze/#argaze.DataStructures.TimeStampedBuffer) instances with different timestamps
+
+for A_ts, A_data in A_data_record.items():
+
+ try:
+
+ # Get nearest B data after current A data and remove all B data before
+ B_ts, B_data = B_data_record.pop_last_until(A_ts)
+
+ # No data stored until A_ts timestamp
+ except KeyError:
+
+ pass
+
+```
+
+## Get last before
+
+![Get last before](../../img/get_last_before.png)
+
+The code below shows how to use [get_last_before](/argaze/#argaze.DataStructures.TimeStampedBuffer.get_last_before) method in order to synchronise two timestamped data buffers with different timestamps:
+
+``` python
+from argaze import DataStructures
+
+# Assuming A_data_record and B_data_record are [TimeStampedBuffer](/argaze/#argaze.DataStructures.TimeStampedBuffer) instances with different timestamps
+
+for A_ts, A_data in A_data_record.items():
+
+ try:
+
+ # Get nearest B data before current A data
+ B_ts, B_data = B_data_record.get_last_before(A_ts)
+
+ # No data stored before A_ts timestamp
+ except KeyError:
+
+ pass
+
+```
+
+## Get last until
+
+![Get last until](../../img/get_last_until.png)
+
+The code below shows how to use [get_last_until](/argaze/#argaze.DataStructures.TimeStampedBuffer.get_last_until) method in order to synchronise two timestamped data buffers with different timestamps:
+
+``` python
+from argaze import DataStructures
+
+# Assuming A_data_record and B_data_record are TimeStampedBuffer instances with different timestamps
+
+for A_ts, A_data in A_data_record.items():
+
+ try:
+
+ # Get nearest B data after current A data
+ B_ts, B_data = B_data_record.get_last_until(A_ts)
+
+ # No data stored until A_ts timestamp
+ except KeyError:
+
+ pass
+
+```
diff --git a/docs/user_guide/timestamped_data/introduction.md b/docs/user_guide/timestamped_data/introduction.md
new file mode 100644
index 0000000..2cee263
--- /dev/null
+++ b/docs/user_guide/timestamped_data/introduction.md
@@ -0,0 +1,6 @@
+Timestamped data
+================
+
+Working with wearable eye tracker devices implies to handle various timestamped data like frames, gaze positions, pupils diameter, fixations, saccades, ...
+
+This section mainly refers to [DataStructures.TimeStampedBuffer](/argaze/#argaze.DataStructures.TimeStampedBuffer) class.
diff --git a/docs/user_guide/timestamped_data/ordered_dictionary.md b/docs/user_guide/timestamped_data/ordered_dictionary.md
new file mode 100644
index 0000000..8c93fc6
--- /dev/null
+++ b/docs/user_guide/timestamped_data/ordered_dictionary.md
@@ -0,0 +1,19 @@
+Ordered dictionary
+==================
+
+[TimeStampedBuffer](/argaze/#argaze.DataStructures.TimeStampedBuffer) class inherits from [OrderedDict](https://docs.python.org/3/library/collections.html#collections.OrderedDict) as data are de facto ordered by time.
+
+Any data type can be stored using int or float keys as timestamp.
+
+```python
+from argaze import DataStructures
+
+# Create a timestamped data buffer
+ts_data = DataStructures.TimeStampedBuffer()
+
+# Store any data type using numeric keys
+ts_data[0] = 123
+ts_data[0.1] = "message"
+ts_data[0.23] = {"key": value}
+...
+```
diff --git a/docs/user_guide/timestamped_data/pandas_dataframe_conversion.md b/docs/user_guide/timestamped_data/pandas_dataframe_conversion.md
new file mode 100644
index 0000000..caddb11
--- /dev/null
+++ b/docs/user_guide/timestamped_data/pandas_dataframe_conversion.md
@@ -0,0 +1,31 @@
+Pandas dataframe conversion
+===========================
+
+A [Pandas dataframe](https://pandas.pydata.org/docs/getting_started/intro_tutorials/01_table_oriented.html#min-tut-01-tableoriented) is a python data structure allowing powerful table processings.
+
+[TimeStampedBuffer](/argaze/#argaze.DataStructures.TimeStampedBuffer) instance can be converted into dataframe provided that data values are stored as dictionaries.
+
+```python
+from argaze import DataStructures
+
+# Create a timestamped data buffer
+ts_data = DataStructures.TimeStampedBuffer()
+
+# Store various data as dictionary
+ts_data[10] = {"A_key": 0, "B_key": 0.123}}
+ts_data[20] = {"A_key": 4, "B_key": 0.567}}
+ts_data[30] = {"A_key": 8, "B_key": 0.901}}
+...
+
+# Convert timestamped data buffer into dataframe
+ts_buffer_dataframe = ts_buffer.as_dataframe()
+```
+
+ts_buffer_dataframe would look like:
+
+|timestamp|A_key|B_key|
+|:--------|:----|:----|
+|10 |0 |0.123|
+|20 |4 |0.567|
+|30 |8 |0.901|
+|... |... |... | \ No newline at end of file
diff --git a/docs/user_guide/timestamped_data/saving_and_loading.md b/docs/user_guide/timestamped_data/saving_and_loading.md
new file mode 100644
index 0000000..d3f2b9c
--- /dev/null
+++ b/docs/user_guide/timestamped_data/saving_and_loading.md
@@ -0,0 +1,14 @@
+Saving and loading
+==================
+
+[TimeStampedBuffer](/argaze/#argaze.DataStructures.TimeStampedBuffer) instance can be saved as and loaded from JSON file format.
+
+```python
+
+# Save
+ts_data.to_json('./data.json')
+
+# Load
+ts_data = DataStructures.TimeStampedBuffer.from_json('./data.json')
+
+```
diff --git a/mkdocs.yml b/mkdocs.yml
index 98b3557..b74c453 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -1,17 +1,50 @@
site_name: ArGaze documentation
nav:
- index.md
-- Getting Started:
- - getting_started/installation.md
- - getting_started/ready-made.md
- - getting_started/demos.md
+- installation.md
+- license.md
+- User Guide:
+ - ArUco Markers:
+ - user_guide/aruco_markers/introduction.md
+ - user_guide/aruco_markers/dictionary_selection.md
+ - user_guide/aruco_markers/markers_creation.md
+ - user_guide/aruco_markers/camera_calibration.md
+ - user_guide/aruco_markers/markers_detection.md
+ - user_guide/aruco_markers/markers_pose_estimation.md
+ - user_guide/aruco_markers/markers_scene_description.md
+ - Areas Of Interest:
+ - user_guide/areas_of_interest/introduction.md
+ - user_guide/areas_of_interest/aoi_scene_description.md
+ - user_guide/areas_of_interest/aoi_projection.md
+ - user_guide/areas_of_interest/vision_cone_filtering.md
+ - user_guide/areas_of_interest/aoi_matching.md
+ - user_guide/areas_of_interest/aoi_frame.md
+ - Augmented Reality environment:
+ - user_guide/ar_environment/introduction.md
+ - user_guide/ar_environment/environment_setup.md
+ - user_guide/ar_environment/environment_exploitation.md
+ - Gaze Analysis:
+ - user_guide/gaze_analysis/introduction.md
+ - user_guide/gaze_analysis/gaze_position.md
+ - user_guide/gaze_analysis/gaze_movement.md
+ - user_guide/gaze_analysis/scan_path.md
+ - Timestamped data:
+ - user_guide/timestamped_data/introduction.md
+ - user_guide/timestamped_data/ordered_dictionary.md
+ - user_guide/timestamped_data/saving_and_loading.md
+ - user_guide/timestamped_data/data_synchronisation.md
+ - user_guide/timestamped_data/pandas_dataframe_conversion.md
+- Use Cases:
+ - use_cases/simone_a320_cockpit_simulator.md
- Code Reference:
- argaze.md
-- Contributor's Guide:
+- Contributor Guide:
- contributor_guide/build_documentation.md
- contributor_guide/unitary_test.md
theme:
name: readthedocs
+ logo: img/logo-argaze.png
+ favicon: img/favicon.ico
palette:
primary: #0299D2
extra_css:
@@ -20,6 +53,7 @@ plugins:
- search
- autorefs
- include-markdown
+ - mkdocs-video
- mkdocstrings:
handlers:
# See: https://mkdocstrings.github.io/python/usage/