aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--docs/img/argaze_load_gui.pngbin0 -> 168761 bytes
-rw-r--r--docs/img/argaze_load_gui_opencv_frame.pngbin0 -> 339281 bytes
-rw-r--r--docs/img/argaze_load_gui_opencv_pipeline.pngbin0 -> 517856 bytes
-rw-r--r--docs/img/argaze_load_gui_random.pngbin0 -> 33593 bytes
-rw-r--r--docs/img/argaze_load_gui_random_pipeline.pngbin0 -> 74788 bytes
-rw-r--r--docs/img/eye_tracker_context.pngbin0 -> 41128 bytes
-rw-r--r--docs/img/pipeline_input_context.pngbin49064 -> 0 bytes
-rw-r--r--docs/index.md18
-rw-r--r--docs/user_guide/aruco_marker_pipeline/advanced_topics/scripting.md77
-rw-r--r--docs/user_guide/aruco_marker_pipeline/aoi_3d_frame.md37
-rw-r--r--docs/user_guide/aruco_marker_pipeline/configuration_and_execution.md88
-rw-r--r--docs/user_guide/eye_tracking_context/advanced_topics/context_definition.md193
-rw-r--r--docs/user_guide/eye_tracking_context/advanced_topics/scripting.md106
-rw-r--r--docs/user_guide/eye_tracking_context/advanced_topics/timestamped_gaze_positions_edition.md (renamed from docs/user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md)13
-rw-r--r--docs/user_guide/eye_tracking_context/configuration_and_execution.md65
-rw-r--r--docs/user_guide/eye_tracking_context/context_modules/opencv.md47
-rw-r--r--docs/user_guide/eye_tracking_context/context_modules/pupil_labs.md32
-rw-r--r--docs/user_guide/eye_tracking_context/context_modules/random.md32
-rw-r--r--docs/user_guide/eye_tracking_context/context_modules/tobii_pro_glasses_2.md59
-rw-r--r--docs/user_guide/eye_tracking_context/introduction.md19
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md54
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md2
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md58
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/introduction.md8
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/visualization.md33
-rw-r--r--docs/user_guide/pipeline_input_context/configuration_and_connection.md35
-rw-r--r--docs/user_guide/pipeline_input_context/context_definition.md57
-rw-r--r--docs/user_guide/pipeline_input_context/introduction.md24
-rw-r--r--docs/user_guide/utils/demonstrations_scripts.md16
-rw-r--r--docs/user_guide/utils/estimate_aruco_markers_pose.md60
-rw-r--r--docs/user_guide/utils/main_commands.md (renamed from docs/user_guide/utils/ready-made_scripts.md)31
-rw-r--r--mkdocs.yml25
-rw-r--r--src/argaze.test/GazeAnalysis/VelocityThresholdIdentification.py514
-rw-r--r--src/argaze/ArFeatures.py18
-rw-r--r--src/argaze/ArUcoMarker/ArUcoDetector.py12
-rw-r--r--src/argaze/ArUcoMarker/ArUcoMarker.py102
-rw-r--r--src/argaze/ArUcoMarker/ArUcoMarkerGroup.py5
-rw-r--r--src/argaze/AreaOfInterest/AOI2DScene.py4
-rw-r--r--src/argaze/AreaOfInterest/AOI3DScene.py10
-rw-r--r--src/argaze/AreaOfInterest/AOIFeatures.py20
-rw-r--r--src/argaze/DataFeatures.py11
-rw-r--r--src/argaze/GazeAnalysis/DeviationCircleCoverage.py258
-rw-r--r--src/argaze/GazeAnalysis/Entropy.py100
-rw-r--r--src/argaze/GazeFeatures.py38
-rw-r--r--src/argaze/__init__.py2
-rw-r--r--src/argaze/__main__.py12
-rw-r--r--src/argaze/utils/UtilsFeatures.py4
-rw-r--r--src/argaze/utils/contexts/OpenCV.py66
-rw-r--r--src/argaze/utils/contexts/Random.py25
-rw-r--r--src/argaze/utils/contexts/TobiiProGlasses2.py4
-rw-r--r--src/argaze/utils/demo/opencv_cursor_context.json6
-rw-r--r--src/argaze/utils/demo/opencv_window_context.json6
52 files changed, 1469 insertions, 937 deletions
diff --git a/docs/img/argaze_load_gui.png b/docs/img/argaze_load_gui.png
new file mode 100644
index 0000000..b8874b2
--- /dev/null
+++ b/docs/img/argaze_load_gui.png
Binary files differ
diff --git a/docs/img/argaze_load_gui_opencv_frame.png b/docs/img/argaze_load_gui_opencv_frame.png
new file mode 100644
index 0000000..3ab3b5e
--- /dev/null
+++ b/docs/img/argaze_load_gui_opencv_frame.png
Binary files differ
diff --git a/docs/img/argaze_load_gui_opencv_pipeline.png b/docs/img/argaze_load_gui_opencv_pipeline.png
new file mode 100644
index 0000000..227a91d
--- /dev/null
+++ b/docs/img/argaze_load_gui_opencv_pipeline.png
Binary files differ
diff --git a/docs/img/argaze_load_gui_random.png b/docs/img/argaze_load_gui_random.png
new file mode 100644
index 0000000..c95a9f5
--- /dev/null
+++ b/docs/img/argaze_load_gui_random.png
Binary files differ
diff --git a/docs/img/argaze_load_gui_random_pipeline.png b/docs/img/argaze_load_gui_random_pipeline.png
new file mode 100644
index 0000000..210d410
--- /dev/null
+++ b/docs/img/argaze_load_gui_random_pipeline.png
Binary files differ
diff --git a/docs/img/eye_tracker_context.png b/docs/img/eye_tracker_context.png
new file mode 100644
index 0000000..638e9a6
--- /dev/null
+++ b/docs/img/eye_tracker_context.png
Binary files differ
diff --git a/docs/img/pipeline_input_context.png b/docs/img/pipeline_input_context.png
deleted file mode 100644
index 8c195ea..0000000
--- a/docs/img/pipeline_input_context.png
+++ /dev/null
Binary files differ
diff --git a/docs/index.md b/docs/index.md
index 2d00d16..2b668a3 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -7,20 +7,26 @@ title: What is ArGaze?
**Useful links**: [Installation](installation.md) | [Source Repository](https://gitpub.recherche.enac.fr/argaze) | [Issue Tracker](https://git.recherche.enac.fr/projects/argaze/issues) | [Contact](mailto:argaze-contact@recherche.enac.fr)
**ArGaze** is an open and flexible Python software library designed to provide a unified and modular approach to gaze analysis or gaze interaction.
-**ArGaze** facilitates **real-time and/or post-processing analysis** for both **screen-based and head-mounted** eye tracking systems.
+
By offering a wide array of gaze metrics and supporting easy extension to incorporate additional metrics, **ArGaze** empowers researchers and practitioners to explore novel analytical approaches efficiently.
![ArGaze pipeline](img/argaze_pipeline.png)
+## Eye tracking context
+
+**ArGaze** facilitates the integration of both **screen-based and head-mounted** eye tracking systems for **real-time and/or post-processing analysis**.
+
+[Learn how to handle various eye tracking context by reading the dedicated user guide section](./user_guide/eye_tracking_context/introduction.md).
+
## Gaze analysis pipeline
-**ArGaze** provides an extensible modules library, allowing to select application-specific algorithms at each pipeline step:
+Once incoming eye tracking data available, **ArGaze** provides an extensible modules library, allowing to select application-specific algorithms at each pipeline step:
* **Fixation/Saccade identification**: dispersion threshold identification, velocity threshold identification, etc.
* **Area Of Interest (AOI) matching**: focus point inside, deviation circle coverage, etc.
* **Scan path analysis**: transition matrix, entropy, explore/exploit ratio, etc.
-Once the incoming data is formatted as required, all those gaze analysis features can be used with any screen-based eye tracker devices.
+All those gaze analysis features can be used with any screen-based eye tracker devices.
[Learn how to build gaze analysis pipelines for various use cases by reading the dedicated user guide section](./user_guide/gaze_analysis_pipeline/introduction.md).
@@ -37,3 +43,9 @@ This ArUco marker pipeline can be combined with any wearable eye tracking device
!!! note
*ArUco marker pipeline is greatly inspired by [Andrew T. Duchowski, Vsevolod Peysakhovich and Krzysztof Krejtz article](https://git.recherche.enac.fr/attachments/download/1990/Using_Pose_Estimation_to_Map_Gaze_to_Detected_Fidu.pdf) about using pose estimation to map gaze to detected fiducial markers.*
+
+## Demonstration
+
+![type:video](https://achil.recherche.enac.fr/videos/argaze_features.mp4)
+
+[Test **ArGaze** by reading the dedicated user guide section](./user_guide/utils/demonstrations_scripts.md). \ No newline at end of file
diff --git a/docs/user_guide/aruco_marker_pipeline/advanced_topics/scripting.md b/docs/user_guide/aruco_marker_pipeline/advanced_topics/scripting.md
index c81d57d..a9d66e9 100644
--- a/docs/user_guide/aruco_marker_pipeline/advanced_topics/scripting.md
+++ b/docs/user_guide/aruco_marker_pipeline/advanced_topics/scripting.md
@@ -74,35 +74,80 @@ from argaze import ArFeatures
...
```
-## Pipeline execution outputs
+## Pipeline execution
-The [ArUcoCamera.watch](../../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method returns data about pipeline execution.
+### Detect ArUco markers, estimate scene pose and project 3D AOI
+
+Pass each camera image with timestamp information to the [ArUcoCamera.watch](../../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method to execute the whole pipeline dedicated to ArUco marker detection, scene pose estimation and 3D AOI projection.
+
+!!! warning "Mandatory"
+
+ The [ArUcoCamera.watch](../../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method must be called from a *try* block to catch pipeline exceptions.
```python
-# Assuming that timestamped images are available
+# Assuming that Full HD (1920x1080) images are available with timestamp values
...:
+ # Edit timestamped image
+ timestamped_image = DataFeatures.TimestampedImage(image, timestamp=timestamp)
+
try:
- # Watch image with ArUco camera
- aruco_camera.watch(image, timestamp=timestamp)
+ # Detect ArUco markers, estimate scene pose then, project 3D AOI into camera frame
+ aruco_camera.watch(timestamped_image)
# Do something with pipeline exception
except Exception as e:
...
- # Do something with detected_markers
- ... aruco_camera.aruco_detector.detected_markers()
+ # Display ArUcoCamera frame image to display detected ArUco markers, scene pose, 2D AOI projection and ArFrame visualization.
+ ... aruco_camera.image()
+```
+
+### Detection outputs
+
+The [ArUcoCamera.watch](../../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method returns data about pipeline execution.
+
+```python
+# Assuming that watch method has been called
+
+# Do something with detected_markers
+... aruco_camera.aruco_detector.detected_markers()
```
Let's understand the meaning of each returned data.
-### *aruco_camera.aruco_detector.detected_markers()*
+#### *aruco_camera.aruco_detector.detected_markers()*
A dictionary containing all detected markers is provided by [ArUcoDetector](../../../argaze.md/#argaze.ArUcoMarker.ArUcoDetector) class.
+### Analyse timestamped gaze positions into the camera frame
+
+As mentioned above, [ArUcoCamera](../../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) inherits from [ArFrame](../../../argaze.md/#argaze.ArFeatures.ArFrame) and, so, benefits from all the services described in the [gaze analysis pipeline section](../../gaze_analysis_pipeline/introduction.md).
+
+Particularly, timestamped gaze positions can be passed one by one to the [ArUcoCamera.look](../../../argaze.md/#argaze.ArFeatures.ArFrame.look) method to execute the whole pipeline dedicated to gaze analysis.
+
+!!! warning "Mandatory"
+
+ The [ArUcoCamera.look](../../../argaze.md/#argaze.ArFeatures.ArFrame.look) method must be called from a *try* block to catch pipeline exceptions.
+
+```python
+# Assuming that timestamped gaze positions are available
+...
+
+ try:
+
+ # Look ArUcoCamera frame at a timestamped gaze position
+ aruco_camera.look(timestamped_gaze_position)
+
+ # Do something with pipeline exception
+ except Exception as e:
+
+ ...
+```
+
## Setup ArUcoCamera image parameters
Specific [ArUcoCamera.image](../../../argaze.md/#argaze.ArFeatures.ArFrame.image) method parameters can be configured thanks to a Python dictionary.
@@ -133,4 +178,18 @@ aruco_camera_image = aruco_camera.image(**image_parameters)
```
!!! note
- [ArUcoCamera](../../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) inherits from [ArFrame](../../../argaze.md/#argaze.ArFeatures.ArFrame) and, so, benefits from all image parameters described in [gaze analysis pipeline visualization section](../../gaze_analysis_pipeline/visualization.md). \ No newline at end of file
+ [ArUcoCamera](../../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) inherits from [ArFrame](../../../argaze.md/#argaze.ArFeatures.ArFrame) and, so, benefits from all image parameters described in [gaze analysis pipeline visualization section](../../gaze_analysis_pipeline/visualization.md).
+
+
+## Display ArUcoScene frames
+
+All [ArUcoScene](../../../argaze.md/#argaze.ArUcoMarker.ArUcoScene) frames image can be displayed as any [ArFrame](../../../argaze.md/#argaze.ArFeatures.ArFrame).
+
+```python
+ ...
+
+ # Display all ArUcoScene frames
+ for frame in aruco_camera.scene_frames():
+
+ ... frame.image()
+``` \ No newline at end of file
diff --git a/docs/user_guide/aruco_marker_pipeline/aoi_3d_frame.md b/docs/user_guide/aruco_marker_pipeline/aoi_3d_frame.md
index 7323f2e..3a029b0 100644
--- a/docs/user_guide/aruco_marker_pipeline/aoi_3d_frame.md
+++ b/docs/user_guide/aruco_marker_pipeline/aoi_3d_frame.md
@@ -69,7 +69,8 @@ Here is the previous extract where "Left_Screen" and "Right_Screen" AOI are defi
}
}
}
- }
+ },
+ "copy_background_into_scenes_frames": true
...
}
}
@@ -96,40 +97,18 @@ The names of 3D AOI **and** their related [ArFrames](../../argaze.md/#argaze.ArF
[ArUcoScene](../../argaze.md/#argaze.ArUcoMarker.ArUcoScene) frame layers are projected into their dedicated [ArUcoScene](../../argaze.md/#argaze.ArUcoMarker.ArUcoScene) layers when the JSON configuration file is loaded.
-## Pipeline execution
-
-### Map ArUcoCamera image into ArUcoScenes frames
-
-After the timestamped camera image is passed to the [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method, it is possible to apply a perspective transformation in order to project the watched image into each [ArUcoScene](../../argaze.md/#argaze.ArUcoMarker.ArUcoScene) [frame's background](../../argaze.md/#argaze.ArFeatures.ArFrame) image.
-
-```python
-# Assuming that Full HD (1920x1080) timestamped images are available
-...:
-
- # Detect ArUco markers, estimate scene pose then, project 3D AOI into camera frame
- aruco_camera.watch(timestamped_image)
+### *copy_background_into_scenes_frames*
- # Map watched image into ArUcoScene frames background
- aruco_camera.map(timestamp=timestamp)
-```
+When the timestamped camera image is passed to the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) method, it is possible to apply a perspective transformation in order to project the watched image into each [ArUcoScene](../../argaze.md/#argaze.ArUcoMarker.ArUcoScene) [frame's background](../../argaze.md/#argaze.ArFeatures.ArFrame) image.
-### Analyze timestamped gaze positions into ArUcoScene frames
+## Pipeline execution
[ArUcoScene](../../argaze.md/#argaze.ArUcoMarker.ArUcoScene) frames benefits from all the services described in the [gaze analysis pipeline section](../gaze_analysis_pipeline/introduction.md).
!!! note
- Timestamped [GazePositions](../../argaze.md/#argaze.GazeFeatures.GazePosition) passed to the [ArUcoCamera.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method are projected into [ArUcoScene](../../argaze.md/#argaze.ArUcoMarker.ArUcoScene) frames if applicable.
-
-### Display each ArUcoScene frames
-
-All [ArUcoScene](../../argaze.md/#argaze.ArUcoMarker.ArUcoScene) frames image can be displayed as any [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame).
-
-```python
- ...
+ Timestamped [GazePositions](../../argaze.md/#argaze.GazeFeatures.GazePosition) passed to the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) are automatically projected into [ArUcoScene](../../argaze.md/#argaze.ArUcoMarker.ArUcoScene) frames if applicable.
- # Display all ArUcoScene frames
- for frame in aruco_camera.scene_frames():
+Each [ArUcoScene](../../argaze.md/#argaze.ArUcoMarker.ArUcoScene) frames image is displayed in a separate window.
- ... frame.image()
-``` \ No newline at end of file
+![ArGaze load GUI](../../img/argaze_load_gui_opencv_frame.png) \ No newline at end of file
diff --git a/docs/user_guide/aruco_marker_pipeline/configuration_and_execution.md b/docs/user_guide/aruco_marker_pipeline/configuration_and_execution.md
index f4bd2d4..c2ee1b9 100644
--- a/docs/user_guide/aruco_marker_pipeline/configuration_and_execution.md
+++ b/docs/user_guide/aruco_marker_pipeline/configuration_and_execution.md
@@ -1,17 +1,17 @@
-Load and execute pipeline
+Edit and execute pipeline
=========================
Once [ArUco markers are placed into a scene](aruco_marker_description.md), they can be detected thanks to [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) class.
As [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) inherits from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame), the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) class also benefits from all the services described in the [gaze analysis pipeline section](../gaze_analysis_pipeline/introduction.md).
-![ArUco camera frame](../../img/aruco_camera_frame.png)
+Once defined, an ArUco marker pipeline needs to embedded inside a context that will provides it both gaze positions and camera images to process.
-## Load JSON configuration file
+![ArUco camera frame](../../img/aruco_camera_frame.png)
-An [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) pipeline can be loaded from a JSON configuration file thanks to [argaze.load](../../argaze.md/#argaze.load) package method.
+## Edit JSON configuration
-Here is a simple JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) configuration file example:
+Here is a simple JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) configuration example:
```json
{
@@ -52,19 +52,7 @@ Here is a simple JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarker.ArUcoCam
}
```
-Then, here is how to load the JSON file:
-
-```python
-import argaze
-
-# Load ArUcoCamera
-with argaze.load('./configuration.json') as aruco_camera:
-
- # Do something with ArUcoCamera
- ...
-```
-
-Now, let's understand the meaning of each JSON entry.
+Let's understand the meaning of each JSON entry.
### argaze.ArUcoMarker.ArUcoCamera.ArUcoCamera
@@ -101,62 +89,32 @@ The usual [ArFrame visualization parameters](../gaze_analysis_pipeline/visualiza
## Pipeline execution
-### Detect ArUco markers, estimate scene pose and project 3D AOI
-
-Pass each camera image with timestamp information to the [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method to execute the whole pipeline dedicated to ArUco marker detection, scene pose estimation and 3D AOI projection.
-
-!!! warning "Mandatory"
-
- The [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method must be called from a *try* block to catch pipeline exceptions.
-
-```python
-# Assuming that Full HD (1920x1080) images are available with timestamp values
-...:
-
- # Edit timestamped image
- timestamped_image = DataFeatures.TimestampedImage(image, timestamp=timestamp)
-
- try:
+A pipeline needs to be embedded into a context to be executed.
- # Detect ArUco markers, estimate scene pose then, project 3D AOI into camera frame
- aruco_camera.watch(timestamped_image)
+Copy the gaze analysis pipeline configuration defined above inside the following context configuration.
- # Do something with pipeline exception
- except Exception as e:
-
- ...
-
- # Display ArUcoCamera frame image to display detected ArUco markers, scene pose, 2D AOI projection and ArFrame visualization.
- ... aruco_camera.image()
+```json
+{
+ "argaze.utils.contexts.OpenCV.Movie": {
+ "name": "Movie player",
+ "path": "./src/argaze/utils/demo/tobii_record/segments/1/fullstream.mp4",
+ "pipeline": JSON CONFIGURATION
+ }
+}
```
-### Analyse timestamped gaze positions into the camera frame
-
-As mentioned above, [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarker.ArUcoCamera) inherits from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) and, so, benefits from all the services described in the [gaze analysis pipeline section](../gaze_analysis_pipeline/introduction.md).
-
-Particularly, timestamped gaze positions can be passed one by one to the [ArUcoCamera.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method to execute the whole pipeline dedicated to gaze analysis.
+Then, use the [*load* command](../utils/main_commands.md) to execute the context.
-!!! warning "Mandatory"
-
- The [ArUcoCamera.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method must be called from a *try* block to catch pipeline exceptions.
-
-```python
-# Assuming that timestamped gaze positions are available
-...
-
- try:
+```shell
+python -m argaze load CONFIGURATION
+```
- # Look ArUcoCamera frame at a timestamped gaze position
- aruco_camera.look(timestamped_gaze_position)
+This command should open a GUI window with the detected markers and identified cursor fixations circles when the mouse moves over the window.
- # Do something with pipeline exception
- except Exception as e:
-
- ...
-```
+![ArGaze load GUI](../../img/argaze_load_gui_opencv_pipeline.png)
!!! note ""
- At this point, the [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method only detects ArUco marker and the [ArUcoCamera.look](../../argaze.md/#argaze.ArFeatures.ArCamera.look) method only processes gaze movement identification without any AOI support as no scene description is provided into the JSON configuration file.
+ At this point, the pipeline only processes gaze movement identification without any AOI support as no scene description is provided into the JSON configuration file.
Read the next chapters to learn [how to estimate scene pose](pose_estimation.md), [how to describe a 3D scene's AOI](aoi_3d_description.md) and [how to project them into the camera frame](aoi_3d_projection.md). \ No newline at end of file
diff --git a/docs/user_guide/eye_tracking_context/advanced_topics/context_definition.md b/docs/user_guide/eye_tracking_context/advanced_topics/context_definition.md
new file mode 100644
index 0000000..99b6c7a
--- /dev/null
+++ b/docs/user_guide/eye_tracking_context/advanced_topics/context_definition.md
@@ -0,0 +1,193 @@
+Define a context class
+======================
+
+The [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext) class defines a generic base class interface to handle incoming eye tracker data before to pass them to a processing pipeline according to [Python context manager feature](https://docs.python.org/3/reference/datamodel.html#context-managers).
+
+The [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext) class interface provides playback features to stop or pause processings, performance assement features to measure how many times processings are called and the time spent by the process.
+
+Besides, there is also a [LiveProcessingContext](../../../argaze.md/#argaze.ArFeatures.LiveProcessingContext) class that inherits from [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext) and that defines an abstract *calibrate* method to write specific device calibration process.
+
+In the same way, there is a [PostProcessingContext](../../../argaze.md/#argaze.ArFeatures.PostProcessingContext) class that inherits from [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext) and that defines abstract *previous* and *next* playback methods to move into record's frames and also defines *duration* and *progression* properties to get information about a record length and processing advancment.
+
+Finally, a specific eye tracking context can be defined into a Python file by writing a class that inherits either from [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext), [LiveProcessingContext](../../../argaze.md/#argaze.ArFeatures.LiveProcessingContext) or [PostProcessingContext](../../../argaze.md/#argaze.ArFeatures.PostProcessingContext) class.
+
+## Write live processing context
+
+Here is a live processing context example that processes gaze positions and camera images in two separated threads:
+
+```python
+from argaze import ArFeatures, DataFeatures
+
+class LiveProcessingExample(ArFeatures.LiveProcessingContext):
+
+ @DataFeatures.PipelineStepInit
+ def __init__(self, **kwargs):
+
+ # Init LiveProcessingContext class
+ super().__init__()
+
+ # Init private attribute
+ self.__parameter = ...
+
+ @property
+ def parameter(self):
+ """Any context specific parameter."""
+ return self.__parameter
+
+ @parameter.setter
+ def parameter(self, parameter):
+ self.__parameter = parameter
+
+ @DataFeatures.PipelineStepEnter
+ def __enter__(self):
+ """Start context."""
+
+ # Start context according any specific parameter
+ ... self.parameter
+
+ # Start a gaze position processing thread
+ self.__gaze_thread = threading.Thread(target = self.__gaze_position_processing)
+ self.__gaze_thread.start()
+
+ # Start a camera image processing thread if applicable
+ self.__camera_thread = threading.Thread(target = self.__camera_image_processing)
+ self.__camera_thread.start()
+
+ return self
+
+ def __gaze_position_processing(self):
+ """Process gaze position."""
+
+ # Processing loop
+ while self.is_running():
+
+ # Pause processing
+ if not self.is_paused():
+
+ # Assuming that timestamp, x and y values are available
+ ...
+
+ # Process timestamped gaze position
+ self._process_gaze_position(timestamp = timestamp, x = x, y = y)
+
+ # Wait some time eventually
+ ...
+
+ def __camera_image_processing(self):
+ """Process camera image if applicable."""
+
+ # Processing loop
+ while self.is_running():
+
+ # Pause processing
+ if not self.is_paused():
+
+ # Assuming that timestamp, camera_image are available
+ ...
+
+ # Process timestamped camera image
+ self._process_camera_image(timestamp = timestamp, image = camera_image)
+
+ # Wait some time eventually
+ ...
+
+ @DataFeatures.PipelineStepExit
+ def __exit__(self, exception_type, exception_value, exception_traceback):
+ """End context."""
+
+ # Stop processing loops
+ self.stop()
+
+ # Stop processing threads
+ threading.Thread.join(self.__gaze_thread)
+ threading.Thread.join(self.__camera_thread)
+
+ def calibrate(self):
+ """Handle device calibration process."""
+
+ ...
+```
+
+## Write post processing context
+
+Here is a post processing context example that processes gaze positions and camera images in a same thread:
+
+```python
+from argaze import ArFeatures, DataFeatures
+
+class PostProcessingExample(ArFeatures.PostProcessingContext):
+
+ @DataFeatures.PipelineStepInit
+ def __init__(self, **kwargs):
+
+ # Init LiveProcessingContext class
+ super().__init__()
+
+ # Init private attribute
+ self.__parameter = ...
+
+ @property
+ def parameter(self):
+ """Any context specific parameter."""
+ return self.__parameter
+
+ @parameter.setter
+ def parameter(self, parameter):
+ self.__parameter = parameter
+
+ @DataFeatures.PipelineStepEnter
+ def __enter__(self):
+ """Start context."""
+
+ # Start context according any specific parameter
+ ... self.parameter
+
+ # Start a reading data thread
+ self.__read_thread = threading.Thread(target = self.__data_reading)
+ self.__read_thread.start()
+
+ return self
+
+ def __data_reading(self):
+ """Process gaze position and camera image if applicable."""
+
+ # Processing loop
+ while self.is_running():
+
+ # Pause processing
+ if not self.is_paused():
+
+ # Assuming that timestamp, camera_image are available
+ ...
+
+ # Process timestamped camera image
+ self._process_camera_image(timestamp = timestamp, image = camera_image)
+
+ # Assuming that timestamp, x and y values are available
+ ...
+
+ # Process timestamped gaze position
+ self._process_gaze_position(timestamp = timestamp, x = x, y = y)
+
+ # Wait some time eventually
+ ...
+
+ @DataFeatures.PipelineStepExit
+ def __exit__(self, exception_type, exception_value, exception_traceback):
+ """End context."""
+
+ # Stop processing loops
+ self.stop()
+
+ # Stop processing threads
+ threading.Thread.join(self.__read_thread)
+
+ def previous(self):
+ """Go to previous camera image frame."""
+ ...
+
+ def next(self):
+ """Go to next camera image frame."""
+ ...
+```
+
diff --git a/docs/user_guide/eye_tracking_context/advanced_topics/scripting.md b/docs/user_guide/eye_tracking_context/advanced_topics/scripting.md
new file mode 100644
index 0000000..8753eb6
--- /dev/null
+++ b/docs/user_guide/eye_tracking_context/advanced_topics/scripting.md
@@ -0,0 +1,106 @@
+Scritp the context
+==================
+
+Context objects are accessible from a Python script.
+
+## Load configuration from JSON file
+
+A context configuration can be loaded from a JSON file using the [*load*](../../../argaze.md/#argaze.load) function.
+
+```python
+from argaze import load
+
+# Load a context
+with load(configuration_filepath) as context:
+
+ while context.is_running():
+
+ # Do something with context
+ ...
+
+ # Wait some time eventually
+ ...
+```
+
+!!! note
+ The **with** statement enables context by calling its **enter** method then ensures that its **exit** method is always called at the end.
+
+## Load configuration from dictionary
+
+A context configuration can be loaded from a Python dictionary using the [*from_dict*](../../../argaze.md/#argaze.DataFeatures.from_dict) function.
+
+```python
+from argaze import DataFeatures
+
+import my_package
+
+# Set working directory to enable relative file path loading
+DataFeatures.set_working_directory('path/to/folder')
+
+# Edit a dict with context configuration
+configuration = {
+ "name": "My context",
+ "parameter": ...,
+ "pipeline": ...
+}
+
+# Load a context from a package
+with DataFeatures.from_dict(my_package.MyContext, configuration) as context:
+
+ while context.is_running():
+
+ # Do something with context
+ ...
+
+ # Wait some time eventually
+ ...
+```
+
+## Manage context
+
+Check the context or the pipeline type to adapt features.
+
+```python
+from argaze import ArFeatures
+
+# Assuming the context is loaded and is running
+...
+
+ # Check context type
+
+ # Live processing case: calibration method is available
+ if issubclass(type(context), ArFeatures.LiveProcessingContext):
+ ...
+
+ # Post processing case: more playback methods are available
+ if issubclass(type(context), ArFeatures.PostProcessingContext):
+ ...
+
+ # Check pipeline type
+
+ # Screen-based case: only gaze positions are processes
+ if issubclass(type(context.pipeline), ArFeatures.ArFrame):
+ ...
+
+ # Head-mounted case: camera images also processes
+ if issubclass(type(context.pipeline), ArFeatures.ArCamera):
+ ...
+```
+
+## Display context
+
+The context image can be displayed in low priority to not block pipeline processing.
+
+```python
+# Assuming the context is loaded and is running
+...
+
+ # Display context if the pipeline is available
+ try:
+
+ ... = context.image(wait = False)
+
+ except DataFeatures.SharedObjectBusy:
+
+ pass
+```
diff --git a/docs/user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md b/docs/user_guide/eye_tracking_context/advanced_topics/timestamped_gaze_positions_edition.md
index 026d287..340dbaf 100644
--- a/docs/user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md
+++ b/docs/user_guide/eye_tracking_context/advanced_topics/timestamped_gaze_positions_edition.md
@@ -3,7 +3,7 @@ Edit timestamped gaze positions
Whatever eye data comes from a file on disk or from a live stream, timestamped gaze positions are required before going further.
-![Timestamped gaze positions](../../img/timestamped_gaze_positions.png)
+![Timestamped gaze positions](../../../img/timestamped_gaze_positions.png)
## Import timestamped gaze positions from CSV file
@@ -28,7 +28,7 @@ for timestamped_gaze_position in ts_gaze_positions:
## Edit timestamped gaze positions from live stream
-Real-time gaze positions can be edited thanks to the [GazePosition](../../argaze.md/#argaze.GazeFeatures.GazePosition) class.
+Real-time gaze positions can be edited thanks to the [GazePosition](../../../argaze.md/#argaze.GazeFeatures.GazePosition) class.
Besides, timestamps can be edited from the incoming data stream or, if not available, they can be edited thanks to the Python [time package](https://docs.python.org/3/library/time.html).
```python
@@ -64,12 +64,3 @@ start_time = time.time()
!!! warning "Free time unit"
Timestamps can either be integers or floats, seconds, milliseconds or what ever you need. The only concern is that all time values used in further configurations have to be in the same unit.
-
-<!--
-!!! note "Eyetracker connectors"
-
- [Read the use cases section to discover examples using specific eyetrackers](./user_cases/introduction.md).
-!-->
-
-!!! note ""
- Now we have timestamped gaze positions at expected format, read the next chapter to start learning [how to analyze them](./configuration_and_execution.md). \ No newline at end of file
diff --git a/docs/user_guide/eye_tracking_context/configuration_and_execution.md b/docs/user_guide/eye_tracking_context/configuration_and_execution.md
new file mode 100644
index 0000000..f13c6a2
--- /dev/null
+++ b/docs/user_guide/eye_tracking_context/configuration_and_execution.md
@@ -0,0 +1,65 @@
+Edit and execute context
+========================
+
+The [utils.contexts module](../../argaze.md/#argaze.utils.contexts) provides ready-made contexts like:
+
+* [Tobii Pro Glasses 2](context_modules/tobii_pro_glasses_2.md) live stream and post processing contexts,
+* [Pupil Labs](context_modules/pupil_labs.md) live stream context,
+* [OpenCV](context_modules/opencv.md) window cursor position and movie processing,
+* [Random](context_modules/random.md) gaze position generator.
+
+## Edit JSON configuration
+
+Here is a JSON configuration that loads a [Random.GazePositionGenerator](../../argaze.md/#argaze.utils.contexts.Random.GazePositionGenerator) context:
+
+```json
+{
+ "argaze.utils.contexts.Random.GazePositionGenerator": {
+ "name": "Random gaze position generator",
+ "range": [1280, 720],
+ "pipeline": {
+ "argaze.ArFeatures.ArFrame": {
+ "size": [1280, 720]
+ }
+ }
+ }
+}
+```
+
+Let's understand the meaning of each JSON entry.
+
+### argaze.utils.contexts.Random.GazePositionGenerator
+
+The class name of the object being loaded from the [utils.contexts module](../../argaze.md/#argaze.utils.contexts).
+
+### *name*
+
+The name of the [ArContext](../../argaze.md/#argaze.ArFeatures.ArContext). Basically useful for visualization purposes.
+
+### *range*
+
+The range of the gaze position being generated. This property is specific to the [Random.GazePositionGenerator](../../argaze.md/#argaze.utils.contexts.Random.GazePositionGenerator) class.
+
+### *pipeline*
+
+A minimal gaze processing pipeline that only draws last gaze position.
+
+## Context execution
+
+A context can be loaded from a JSON configuration file using the [*load* command](../utils/main_commands.md).
+
+```shell
+python -m argaze load CONFIGURATION
+```
+
+This command should open a GUI window with a random yellow dot inside.
+
+![ArGaze load GUI](../../img/argaze_load_gui_random.png)
+
+!!! note ""
+
+ At this point, it is possible to load any ready-made context from [utils.contexts](../../argaze.md/#argaze.utils.contexts) module.
+
+ However, the incoming gaze positions are not processed and gaze mapping would not be available for head-mounted eye tracker context.
+
+ Read the [gaze analysis pipeline section](../gaze_analysis_pipeline/introduction.md) to learn how to process gaze positions then, the [ArUco markers pipeline section](../aruco_marker_pipeline/introduction.md) to learn how to enable gaze mapping with an ArUco markers setup.
diff --git a/docs/user_guide/eye_tracking_context/context_modules/opencv.md b/docs/user_guide/eye_tracking_context/context_modules/opencv.md
new file mode 100644
index 0000000..7244cd4
--- /dev/null
+++ b/docs/user_guide/eye_tracking_context/context_modules/opencv.md
@@ -0,0 +1,47 @@
+OpenCV
+======
+
+ArGaze provides a ready-made contexts to process cursor position over Open CV window and process movie images.
+
+To select a desired context, the JSON samples have to be edited and saved inside an [ArContext configuration](../configuration_and_execution.md) file.
+Notice that the *pipeline* entry is mandatory.
+
+```json
+{
+ JSON sample
+ "pipeline": ...
+}
+```
+
+Read more about [ArContext base class in code reference](../../../argaze.md/#argaze.ArFeatures.ArContext).
+
+## Cursor
+
+::: argaze.utils.contexts.OpenCV.Cursor
+
+### JSON sample
+
+```json
+{
+ "argaze.utils.contexts.OpenCV.Cursor": {
+ "name": "Open CV cursor",
+ "pipeline": ...
+ }
+}
+```
+
+## Movie
+
+::: argaze.utils.contexts.OpenCV.Movie
+
+### JSON sample
+
+```json
+{
+ "argaze.utils.contexts.OpenCV.Movie": {
+ "name": "Open CV cursor",
+ "path": "./src/argaze/utils/demo/tobii_record/segments/1/fullstream.mp4",
+ "pipeline": ...
+ }
+}
+```
diff --git a/docs/user_guide/eye_tracking_context/context_modules/pupil_labs.md b/docs/user_guide/eye_tracking_context/context_modules/pupil_labs.md
new file mode 100644
index 0000000..d2ec336
--- /dev/null
+++ b/docs/user_guide/eye_tracking_context/context_modules/pupil_labs.md
@@ -0,0 +1,32 @@
+Pupil Labs
+==========
+
+ArGaze provides a ready-made context to work with Pupil Labs devices.
+
+To select a desired context, the JSON samples have to be edited and saved inside an [ArContext configuration](../configuration_and_execution.md) file.
+Notice that the *pipeline* entry is mandatory.
+
+```json
+{
+ JSON sample
+ "pipeline": ...
+}
+```
+
+Read more about [ArContext base class in code reference](../../../argaze.md/#argaze.ArFeatures.ArContext).
+
+## Live Stream
+
+::: argaze.utils.contexts.PupilLabs.LiveStream
+
+### JSON sample
+
+```json
+{
+ "argaze.utils.contexts.PupilLabs.LiveStream": {
+ "name": "Pupil Labs live stream",
+ "project": "my_experiment",
+ "pipeline": ...
+ }
+}
+```
diff --git a/docs/user_guide/eye_tracking_context/context_modules/random.md b/docs/user_guide/eye_tracking_context/context_modules/random.md
new file mode 100644
index 0000000..89d7501
--- /dev/null
+++ b/docs/user_guide/eye_tracking_context/context_modules/random.md
@@ -0,0 +1,32 @@
+Random
+======
+
+ArGaze provides a ready-made context to generate random gaze positions.
+
+To select a desired context, the JSON samples have to be edited and saved inside an [ArContext configuration](../configuration_and_execution.md) file.
+Notice that the *pipeline* entry is mandatory.
+
+```json
+{
+ JSON sample
+ "pipeline": ...
+}
+```
+
+Read more about [ArContext base class in code reference](../../../argaze.md/#argaze.ArFeatures.ArContext).
+
+## Gaze Position Generator
+
+::: argaze.utils.contexts.Random.GazePositionGenerator
+
+### JSON sample
+
+```json
+{
+ "argaze.utils.contexts.Random.GazePositionGenerator": {
+ "name": "Random gaze position generator",
+ "range": [1280, 720],
+ "pipeline": ...
+ }
+}
+```
diff --git a/docs/user_guide/eye_tracking_context/context_modules/tobii_pro_glasses_2.md b/docs/user_guide/eye_tracking_context/context_modules/tobii_pro_glasses_2.md
new file mode 100644
index 0000000..fba6931
--- /dev/null
+++ b/docs/user_guide/eye_tracking_context/context_modules/tobii_pro_glasses_2.md
@@ -0,0 +1,59 @@
+Tobii Pro Glasses 2
+===================
+
+ArGaze provides a ready-made context to work with Tobii Pro Glasses 2 devices.
+
+To select a desired context, the JSON samples have to be edited and saved inside an [ArContext configuration](../configuration_and_execution.md) file.
+Notice that the *pipeline* entry is mandatory.
+
+```json
+{
+ JSON sample
+ "pipeline": ...
+}
+```
+
+Read more about [ArContext base class in code reference](../../../argaze.md/#argaze.ArFeatures.ArContext).
+
+## Live Stream
+
+::: argaze.utils.contexts.TobiiProGlasses2.LiveStream
+
+### JSON sample
+
+```json
+{
+ "argaze.utils.contexts.TobiiProGlasses2.LiveStream": {
+ "name": "Tobii Pro Glasses 2 live stream",
+ "address": "10.34.0.17",
+ "project": "my_experiment",
+ "participant": "subject-A",
+ "configuration": {
+ "sys_ec_preset": "Indoor",
+ "sys_sc_width": 1920,
+ "sys_sc_height": 1080,
+ "sys_sc_fps": 25,
+ "sys_sc_preset": "Auto",
+ "sys_et_freq": 50,
+ "sys_mems_freq": 100
+ },
+ "pipeline": ...
+ }
+}
+```
+
+## Post Processing
+
+::: argaze.utils.contexts.TobiiProGlasses2.PostProcessing
+
+### JSON sample
+
+```json
+{
+ "argaze.utils.contexts.TobiiProGlasses2.PostProcessing" : {
+ "name": "Tobii Pro Glasses 2 post-processing",
+ "segment": "./src/argaze/utils/demo/tobii_record/segments/1",
+ "pipeline": ...
+ }
+}
+```
diff --git a/docs/user_guide/eye_tracking_context/introduction.md b/docs/user_guide/eye_tracking_context/introduction.md
new file mode 100644
index 0000000..a6208b2
--- /dev/null
+++ b/docs/user_guide/eye_tracking_context/introduction.md
@@ -0,0 +1,19 @@
+Overview
+========
+
+This section explains how to handle eye tracker data from various sources as live streams or archived files before to passing them to a processing pipeline. Those various usages are covered by the notion of **eye tracking context**.
+
+To use a ready-made eye tracking context, you only need to know:
+
+* [How to edit and execute a context](configuration_and_execution.md)
+
+More advanced features are also explained like:
+
+* [How to script context](./advanced_topics/scripting.md),
+* [How to define a context](./advanced_topics/context_definition.md),
+* [How to edit timestamped gaze positions](advanced_topics/timestamped_gaze_positions_edition.md)
+
+To get deeper in how context works, the schema below mentions *enter* and *exit* methods which are related to the notion of [Python context manager](https://docs.python.org/3/reference/datamodel.html#context-managers).
+
+![ArContext class](../../img/eye_tracker_context.png)
+
diff --git a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md
index 026cb3f..264e866 100644
--- a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md
+++ b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md
@@ -66,7 +66,28 @@ from argaze import ArFeatures
...
```
-## Pipeline execution updates
+## Pipeline execution
+
+Timestamped [GazePositions](../../../argaze.md/#argaze.GazeFeatures.GazePosition) have to be passed one by one to the [ArFrame.look](../../../argaze.md/#argaze.ArFeatures.ArFrame.look) method to execute the whole instantiated pipeline.
+
+!!! warning "Mandatory"
+
+ The [ArFrame.look](../../../argaze.md/#argaze.ArFeatures.ArFrame.look) method must be called from a *try* block to catch pipeline exceptions.
+
+```python
+# Assuming that timestamped gaze positions are available
+...
+
+ try:
+
+ # Look ArFrame at a timestamped gaze position
+ ar_frame.look(timestamped_gaze_position)
+
+ # Do something with pipeline exception
+ except Exception as e:
+
+ ...
+```
Calling [ArFrame.look](../../../argaze.md/#argaze.ArFeatures.ArFrame.look) method leads to update many data into the pipeline.
@@ -186,3 +207,34 @@ ar_frame_image = ar_frame.image(**image_parameters)
# Do something with ArFrame image
...
```
+
+Then, [ArFrame.image](../../../argaze.md/#argaze.ArFeatures.ArFrame.image) method can be called in various situations.
+
+### Live window display
+
+While timestamped gaze positions are processed by [ArFrame.look](../../../argaze.md/#argaze.ArFeatures.ArFrame.look) method, it is possible to display the [ArFrame](../../../argaze.md/#argaze.ArFeatures.ArFrame) image thanks to the [OpenCV package](https://pypi.org/project/opencv-python/).
+
+```python
+import cv2
+
+def main():
+
+ # Assuming ArFrame is loaded
+ ...
+
+ # Create a window to display ArFrame
+ cv2.namedWindow(ar_frame.name, cv2.WINDOW_AUTOSIZE)
+
+ # Assuming that timestamped gaze positions are being processed by ArFrame.look method
+ ...
+
+ # Update ArFrame image display
+ cv2.imshow(ar_frame.name, ar_frame.image())
+
+ # Wait 10 ms
+ cv2.waitKey(10)
+
+if __name__ == '__main__':
+
+ main()
+``` \ No newline at end of file
diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md
index be27c69..2b64091 100644
--- a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md
+++ b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md
@@ -5,7 +5,7 @@ Once [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) is [configured](confi
![Layer](../../img/ar_layer.png)
-## Add ArLayer to ArFrame JSON configuration file
+## Add ArLayer to ArFrame JSON configuration
The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class defines a space where to match fixations with AOI and inside which those matches need to be analyzed.
diff --git a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md
index 57a9d71..58919e5 100644
--- a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md
+++ b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md
@@ -1,15 +1,15 @@
-Load and execute pipeline
+Edit and execute pipeline
=========================
The [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) class defines a rectangular area where timestamped gaze positions are projected in and inside which they need to be analyzed.
-![Frame](../../img/ar_frame.png)
+Once defined, a gaze analysis pipeline needs to embedded inside a context that will provides it gaze positions to process.
-## Load JSON configuration file
+![Frame](../../img/ar_frame.png)
-An [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline can be loaded from a JSON configuration file thanks to the [argaze.load](../../argaze.md/#argaze.load) package method.
+## Edit JSON configuration
-Here is a simple JSON [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) configuration file example:
+Here is a simple JSON [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) configuration example:
```json
{
@@ -35,19 +35,7 @@ Here is a simple JSON [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) conf
}
```
-Then, here is how to load the JSON file:
-
-```python
-import argaze
-
-# Load ArFrame
-with argaze.load('./configuration.json') as ar_frame:
-
- # Do something with ArFrame
- ...
-```
-
-Now, let's understand the meaning of each JSON entry.
+Let's understand the meaning of each JSON entry.
### argaze.ArFeatures.ArFrame
@@ -103,28 +91,32 @@ In the example file, the chosen analysis algorithms are the [Basic](../../argaze
## Pipeline execution
-Timestamped [GazePositions](../../argaze.md/#argaze.GazeFeatures.GazePosition) have to be passed one by one to the [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method to execute the whole instantiated pipeline.
+A pipeline needs to be embedded into a context to be executed.
-!!! warning "Mandatory"
+Copy the gaze analysis pipeline configuration defined above inside the following context configuration.
- The [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method must be called from a *try* block to catch pipeline exceptions.
+```json
+{
+ "argaze.utils.contexts.Random.GazePositionGenerator": {
+ "name": "Random gaze position generator",
+ "range": [1920, 1080],
+ "pipeline": JSON CONFIGURATION
+ }
+}
+```
-```python
-# Assuming that timestamped gaze positions are available
-...
+Then, use the [*load* command](../utils/main_commands.md) to execute the context.
- try:
+```shell
+python -m argaze load CONFIGURATION
+```
- # Look ArFrame at a timestamped gaze position
- ar_frame.look(timestamped_gaze_position)
+This command should open a GUI window with a random yellow dot and identified fixations circles.
+
+![ArGaze load GUI](../../img/argaze_load_gui_random_pipeline.png)
- # Do something with pipeline exception
- except Exception as e:
-
- ...
-```
!!! note ""
- At this point, the [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method only processes gaze movement identification and scan path analysis without any AOI neither any recording or visualization supports.
+ At this point, the pipeline only processes gaze movement identification and scan path analysis without any AOI neither any recording or visualization supports.
Read the next chapters to learn how to [describe AOI](aoi_2d_description.md), [add AOI analysis](aoi_analysis.md), [record gaze analysis](recording.md) and [visualize pipeline steps](visualization.md). \ No newline at end of file
diff --git a/docs/user_guide/gaze_analysis_pipeline/introduction.md b/docs/user_guide/gaze_analysis_pipeline/introduction.md
index c12f669..1b06ff6 100644
--- a/docs/user_guide/gaze_analysis_pipeline/introduction.md
+++ b/docs/user_guide/gaze_analysis_pipeline/introduction.md
@@ -1,7 +1,10 @@
Overview
========
-This section explains how to create gaze analysis pipelines for various use cases.
+This section explains how to process incoming gaze positions through a **gaze analysis pipeline**.
+
+!!! warning "Read eye tracking context section before"
+ This section assumes that the incoming gaze positions are provided by an [eye tracking context](../eye_tracking_context/introduction.md).
First, let's look at the schema below: it gives an overview of the main notions involved in the following chapters.
@@ -9,8 +12,7 @@ First, let's look at the schema below: it gives an overview of the main notions
To build your own gaze analysis pipeline, you need to know:
-* [How to edit timestamped gaze positions](timestamped_gaze_positions_edition.md),
-* [How to load and execute gaze analysis pipeline](configuration_and_execution.md),
+* [How to edit and execute a pipeline](configuration_and_execution.md),
* [How to describe AOI](aoi_2d_description.md),
* [How to enable AOI analysis](aoi_analysis.md),
* [How to visualize pipeline steps outputs](visualization.md),
diff --git a/docs/user_guide/gaze_analysis_pipeline/visualization.md b/docs/user_guide/gaze_analysis_pipeline/visualization.md
index 6b9805c..32395c3 100644
--- a/docs/user_guide/gaze_analysis_pipeline/visualization.md
+++ b/docs/user_guide/gaze_analysis_pipeline/visualization.md
@@ -5,7 +5,7 @@ Visualization is not a pipeline step, but each [ArFrame](../../argaze.md/#argaze
![ArFrame visualization](../../img/visualization.png)
-## Add image parameters to ArFrame JSON configuration file
+## Add image parameters to ArFrame JSON configuration
[ArFrame.image](../../argaze.md/#argaze.ArFeatures.ArFrame.image) method parameters can be configured thanks to a dedicated JSON entry.
@@ -82,37 +82,6 @@ Here is an extract from the JSON ArFrame configuration file with a sample where
Most of *image_parameters* entries work if related ArFrame/ArLayer pipeline steps are enabled.
For example, a JSON *draw_scan_path* entry needs GazeMovementIdentifier and ScanPath steps to be enabled.
-Then, [ArFrame.image](../../argaze.md/#argaze.ArFeatures.ArFrame.image) method can be called in various situations.
-
-## Live window display
-
-While timestamped gaze positions are processed by [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method, it is possible to display the [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) image thanks to the [OpenCV package](https://pypi.org/project/opencv-python/).
-
-```python
-import cv2
-
-def main():
-
- # Assuming ArFrame is loaded
- ...
-
- # Create a window to display ArFrame
- cv2.namedWindow(ar_frame.name, cv2.WINDOW_AUTOSIZE)
-
- # Assuming that timestamped gaze positions are being processed by ArFrame.look method
- ...
-
- # Update ArFrame image display
- cv2.imshow(ar_frame.name, ar_frame.image())
-
- # Wait 10 ms
- cv2.waitKey(10)
-
-if __name__ == '__main__':
-
- main()
-```
-
!!! note "Export to video file"
Video exportation is detailed in [gaze analysis recording chapter](recording.md). \ No newline at end of file
diff --git a/docs/user_guide/pipeline_input_context/configuration_and_connection.md b/docs/user_guide/pipeline_input_context/configuration_and_connection.md
deleted file mode 100644
index 4aac88a..0000000
--- a/docs/user_guide/pipeline_input_context/configuration_and_connection.md
+++ /dev/null
@@ -1,35 +0,0 @@
-Load and connect a context
-==========================
-
-Once an [ArContext is defined](context_definition.md), it have to be connected to a pipeline.
-
-# Load JSON configuration file
-
-An [ArContext](../../argaze.md/#argaze.ArFeatures.ArContext) can be loaded from a JSON configuration file thanks to the [argaze.load](../../argaze.md/#argaze.load) package method.
-
-Here is a JSON configuration file related to the [previously defined Example context](context_definition.md):
-
-```json
-{
- "my_context.Example": {
- "name": "My example context",
- "parameter": ...,
- "pipeline": "pipeline.json"
- }
-}
-```
-
-Then, here is how to load the JSON file:
-
-```python
-import argaze
-
-# Load ArContext
-with argaze.load('./configuration.json') as ar_context:
-
- # Do something with ArContext
- ...
-```
-
-!!! note
- There is nothing to do to execute a loaded context as it is handled inside its own **__enter__** method.
diff --git a/docs/user_guide/pipeline_input_context/context_definition.md b/docs/user_guide/pipeline_input_context/context_definition.md
deleted file mode 100644
index 7d30438..0000000
--- a/docs/user_guide/pipeline_input_context/context_definition.md
+++ /dev/null
@@ -1,57 +0,0 @@
-Define a context class
-======================
-
-The [ArContext](../../argaze.md/#argaze.ArFeatures.ArContext) class defines a generic class interface to handle pipeline inputs according to [Python context manager feature](https://docs.python.org/3/reference/datamodel.html#context-managers).
-
-# Write Python context file
-
-A specific [ArContext](../../argaze.md/#argaze.ArFeatures.ArContext) can be defined into a Python file.
-
-Here is an example context defined into *my_context.py* file:
-
-```python
-from argaze import ArFeatures, DataFeatures
-
-class Example(ArFeatures.ArContext):
-
- @DataFeatures.PipelineStepInit
- def __init__(self, **kwargs):
-
- # Init ArContext class
- super().__init__()
-
- # Init private attribute
- self.__parameter = ...
-
- @property
- def parameter(self):
- """Any context specific parameter."""
- return self.__parameter
-
- @parameter.setter
- def parameter(self, parameter):
- self.__parameter = parameter
-
- @DataFeatures.PipelineStepEnter
- def __enter__(self):
-
- # Start context according any specific parameter
- ... self.parameter
-
- # Assuming that timestamp, x and y values are available
- ...
-
- # Process timestamped gaze position
- self._process_gaze_position(timestamp = timestamp, x = x, y = y)
-
- @DataFeatures.PipelineStepExit
- def __exit__(self, exception_type, exception_value, exception_traceback):
-
- # End context
- ...
-```
-
-!!! note ""
-
- The next chapter explains how to [load a context to connect it with a pipeline](configuration_and_connection.md).
- \ No newline at end of file
diff --git a/docs/user_guide/pipeline_input_context/introduction.md b/docs/user_guide/pipeline_input_context/introduction.md
deleted file mode 100644
index e31ad54..0000000
--- a/docs/user_guide/pipeline_input_context/introduction.md
+++ /dev/null
@@ -1,24 +0,0 @@
-Overview
-========
-
-This section explains how to connect [gaze analysis](../gaze_analysis_pipeline/introduction.md) or [augmented reality](../aruco_marker_pipeline/introduction.md) pipelines with various input contexts.
-
-First, let's look at the schema below: it gives an overview of the main notions involved in the following chapters.
-
-![Pipeline input context](../../img/pipeline_input_context.png)
-
-To build your own input context, you need to know:
-
-* [How to define a context class](context_definition.md),
-* [How to load a context to connect with a pipeline](configuration_and_connection.md),
-
-!!! warning "Documentation in progress"
-
- This section is not yet fully done. Please look at the [demonstrations scripts chapter](../utils/demonstrations_scripts.md) to know more about this notion.
-
-<!--
-* [How to stop a context](stop.md),
-* [How to pause and resume a context](pause_and_resume.md),
-* [How to visualize a context](visualization.md),
-* [How to handle pipeline exceptions](exceptions.md)
-!-->
diff --git a/docs/user_guide/utils/demonstrations_scripts.md b/docs/user_guide/utils/demonstrations_scripts.md
index f293980..dd1b8e0 100644
--- a/docs/user_guide/utils/demonstrations_scripts.md
+++ b/docs/user_guide/utils/demonstrations_scripts.md
@@ -11,18 +11,26 @@ Collection of command-line scripts for demonstration purpose.
## Random context
-Load **random_context.json** file to analyze random gaze positions:
+Load **random_context.json** file to process random gaze positions:
```shell
python -m argaze load ./src/argaze/utils/demo/random_context.json
```
-## OpenCV window context
+## OpenCV cursor context
-Load **opencv_window_context.json** file to analyze mouse pointer positions over OpenCV window:
+Load **opencv_cursor_context.json** file to process cursor pointer positions over OpenCV window:
```shell
-python -m argaze load ./src/argaze/utils/demo/opencv_window_context.json
+python -m argaze load ./src/argaze/utils/demo/opencv_cursor_context.json
+```
+
+## OpenCV movie context
+
+Load **opencv_movie_context.json** file to process movie pictures and also cursor pointer positions over OpenCV window:
+
+```shell
+python -m argaze load ./src/argaze/utils/demo/opencv_movie_context.json
```
## Tobii Pro Glasses 2
diff --git a/docs/user_guide/utils/estimate_aruco_markers_pose.md b/docs/user_guide/utils/estimate_aruco_markers_pose.md
new file mode 100644
index 0000000..3d34972
--- /dev/null
+++ b/docs/user_guide/utils/estimate_aruco_markers_pose.md
@@ -0,0 +1,60 @@
+Estimate ArUco markers pose
+===========================
+
+This **ArGaze** application detects ArUco markers inside a movie frame then, export pose estimation as .obj file into a folder.
+
+Firstly, edit **utils/estimate_markers_pose/context.json** file as to select a movie *path*.
+
+```json
+{
+ "argaze.utils.contexts.OpenCV.Movie" : {
+ "name": "ArUco markers pose estimator",
+ "path": "./src/argaze/utils/demo/tobii_record/segments/1/fullstream.mp4",
+ "pipeline": "pipeline.json"
+ }
+}
+```
+
+Sencondly, edit **utils/estimate_markers_pose/pipeline.json** file to setup ArUco camera *size*, ArUco detector *dictionary*, *pose_size* and *pose_ids* attributes.
+
+```json
+{
+ "argaze.ArUcoMarker.ArUcoCamera.ArUcoCamera": {
+ "name": "Full HD Camera",
+ "size": [1920, 1080],
+ "aruco_detector": {
+ "dictionary": "DICT_APRILTAG_16h5",
+ "pose_size": 4,
+ "pose_ids": [],
+ "parameters": {
+ "useAruco3Detection": 1
+ },
+ "observers":{
+ "observers.ArUcoMarkersPoseRecorder": {
+ "output_folder": "_export/records/aruco_markers_group"
+ }
+ }
+ },
+ "sides_mask": 420,
+ "image_parameters": {
+ "background_weight": 1,
+ "draw_gaze_positions": {
+ "color": [0, 255, 255],
+ "size": 4
+ },
+ "draw_detected_markers": {
+ "color": [255, 255, 255],
+ "draw_axes": {
+ "thickness": 4
+ }
+ }
+ }
+ }
+}
+```
+
+Then, launch the application.
+
+```shell
+python -m argaze load ./src/argaze/utils/estimate_markers_pose/context.json
+``` \ No newline at end of file
diff --git a/docs/user_guide/utils/ready-made_scripts.md b/docs/user_guide/utils/main_commands.md
index 892fef8..4dd3434 100644
--- a/docs/user_guide/utils/ready-made_scripts.md
+++ b/docs/user_guide/utils/main_commands.md
@@ -1,15 +1,12 @@
-Ready-made scripts
-==================
+Main commands
+=============
-Collection of command-line scripts to provide useful features.
-
-!!! note
- *Consider that all inline commands below have to be executed at the root of ArGaze package folder.*
+The **ArGaze** package comes with top-level commands.
!!! note
*Use -h option to get command arguments documentation.*
-## Load ArContext JSON configuration
+## Load
Load and execute any [ArContext](../../argaze.md/#argaze.ArFeatures.ArContext) from a JSON CONFIGURATION file
@@ -17,6 +14,10 @@ Load and execute any [ArContext](../../argaze.md/#argaze.ArFeatures.ArContext) f
python -m argaze load CONFIGURATION
```
+This command should open a GUI window to display the image of the context's pipeline.
+
+![ArGaze load GUI](../../img/argaze_load_gui.png)
+
### Send command
Use -p option to enable pipe communication at given address:
@@ -46,24 +47,10 @@ echo "context.pause()" > /tmp/argaze
echo "context.resume()" > /tmp/argaze
```
-## Edit JSON configuration
+## Edit
Modify the content of JSON CONFIGURATION file with another JSON CHANGES file then, save the result into an OUTPUT file
```shell
python -m argaze edit CONFIGURATION CHANGES OUTPUT
```
-
-## Estimate ArUco markers pose
-
-This application detects ArUco markers inside a movie frame then, export pose estimation as .obj file into a folder.
-
-Firstly, edit **utils/estimate_markers_pose/context.json** file as to select a movie *path*.
-
-Sencondly, edit **utils/estimate_markers_pose/pipeline.json** file to setup ArUco detector *dictionary*, *pose_size* and *pose_ids* attributes.
-
-Then, launch the application.
-
-```shell
-python -m argaze load ./src/argaze/utils/estimate_markers_pose/context.json
-``` \ No newline at end of file
diff --git a/mkdocs.yml b/mkdocs.yml
index 2ec7046..17fc65a 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -4,9 +4,20 @@ nav:
- installation.md
- license.md
- User Guide:
+ - Eye Tracking Context:
+ - user_guide/eye_tracking_context/introduction.md
+ - user_guide/eye_tracking_context/configuration_and_execution.md
+ - Context Modules:
+ - user_guide/eye_tracking_context/context_modules/tobii_pro_glasses_2.md
+ - user_guide/eye_tracking_context/context_modules/pupil_labs.md
+ - user_guide/eye_tracking_context/context_modules/opencv.md
+ - user_guide/eye_tracking_context/context_modules/random.md
+ - Advanced Topics:
+ - user_guide/eye_tracking_context/advanced_topics/scripting.md
+ - user_guide/eye_tracking_context/advanced_topics/context_definition.md
+ - user_guide/eye_tracking_context/advanced_topics/timestamped_gaze_positions_edition.md
- Gaze Analysis Pipeline:
- user_guide/gaze_analysis_pipeline/introduction.md
- - user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md
- user_guide/gaze_analysis_pipeline/configuration_and_execution.md
- user_guide/gaze_analysis_pipeline/aoi_2d_description.md
- user_guide/gaze_analysis_pipeline/aoi_analysis.md
@@ -36,13 +47,10 @@ nav:
- user_guide/aruco_marker_pipeline/advanced_topics/scripting.md
- user_guide/aruco_marker_pipeline/advanced_topics/optic_parameters_calibration.md
- user_guide/aruco_marker_pipeline/advanced_topics/aruco_detector_configuration.md
- - Pipeline Input Context:
- - user_guide/pipeline_input_context/introduction.md
- - user_guide/pipeline_input_context/context_definition.md
- - user_guide/pipeline_input_context/configuration_and_connection.md
- utils:
- - user_guide/utils/ready-made_scripts.md
+ - user_guide/utils/main_commands.md
- user_guide/utils/demonstrations_scripts.md
+ - user_guide/utils/estimate_aruco_markers_pose.md
- Use Cases:
- Pilot gaze monitoring:
- use_cases/pilot_gaze_monitoring/introduction.md
@@ -80,6 +88,11 @@ plugins:
annotations_path: brief
show_submodules: true
show_root_toc_entry: false
+ show_if_no_docstring: false
+ modernize_annotations: true
+ filters:
+ - "!^__"
+
watch:
- src/argaze
markdown_extensions:
diff --git a/src/argaze.test/GazeAnalysis/VelocityThresholdIdentification.py b/src/argaze.test/GazeAnalysis/VelocityThresholdIdentification.py
index e7431b5..9bb07cb 100644
--- a/src/argaze.test/GazeAnalysis/VelocityThresholdIdentification.py
+++ b/src/argaze.test/GazeAnalysis/VelocityThresholdIdentification.py
@@ -28,301 +28,301 @@ from argaze.GazeAnalysis import VelocityThresholdIdentification
import numpy
def build_gaze_fixation(size: int, center: tuple, deviation_max: float, min_time: float, max_time: float, start_ts: float = 0., validity: list = []):
- """ Generate N TimeStampedGazePsoitions dispersed around a center point for testing purpose.
- Timestamps are current time after random sleep (second).
- GazePositions are random values.
- """
- ts_gaze_positions = GazeFeatures.TimeStampedGazePositions()
+ """ Generate N TimeStampedGazePsoitions dispersed around a center point for testing purpose.
+ Timestamps are current time after random sleep (second).
+ GazePositions are random values.
+ """
+ ts_gaze_positions = GazeFeatures.TimeStampedGazePositions()
- start_time = time.time()
+ start_time = time.time()
- for i in range(0, size):
+ for i in range(0, size):
- # Sleep a random time
- sleep_time = random.random() * (max_time - min_time) + min_time
- time.sleep(sleep_time)
+ # Sleep a random time
+ sleep_time = random.random() * (max_time - min_time) + min_time
+ time.sleep(sleep_time)
- # Check position validity
- valid = True
- if len(validity) > i:
+ # Check position validity
+ valid = True
+ if len(validity) > i:
- valid = validity[i]
+ valid = validity[i]
- if valid:
+ if valid:
- # Edit gaze position
- random_x = center[0] + deviation_max * (random.random() - 0.5) / math.sqrt(2)
- random_y = center[1] + deviation_max * (random.random() - 0.5) / math.sqrt(2)
- gaze_position = GazeFeatures.GazePosition((random_x, random_y))
+ # Edit gaze position
+ random_x = center[0] + deviation_max * (random.random() - 0.5) / math.sqrt(2)
+ random_y = center[1] + deviation_max * (random.random() - 0.5) / math.sqrt(2)
+ gaze_position = GazeFeatures.GazePosition((random_x, random_y))
- else:
+ else:
- gaze_position = GazeFeatures.GazePosition()
+ gaze_position = GazeFeatures.GazePosition()
- # Timestamp gaze position
- gaze_position.timestamp = time.time() - start_time + start_ts
+ # Timestamp gaze position
+ gaze_position.timestamp = time.time() - start_time + start_ts
- # Store gaze position
- ts_gaze_positions.append(gaze_position)
+ # Store gaze position
+ ts_gaze_positions.append(gaze_position)
- return ts_gaze_positions
+ return ts_gaze_positions
def build_gaze_saccade(size: int, center_A: tuple, center_B: tuple, min_time: float, max_time: float, start_ts: float = 0., validity: list = []):
- """ Generate N TimeStampedGazePsoitions between 2 center points for testing purpose.
- Timestamps are current time after random sleep (second).
- GazePositions are random values.
- """
- ts_gaze_positions = GazeFeatures.TimeStampedGazePositions()
+ """ Generate N TimeStampedGazePsoitions between 2 center points for testing purpose.
+ Timestamps are current time after random sleep (second).
+ GazePositions are random values.
+ """
+ ts_gaze_positions = GazeFeatures.TimeStampedGazePositions()
- start_time = time.time()
+ start_time = time.time()
- for i in range(0, size):
+ for i in range(0, size):
- # Sleep a random time
- sleep_time = random.random() * (max_time - min_time) + min_time
- time.sleep(sleep_time)
+ # Sleep a random time
+ sleep_time = random.random() * (max_time - min_time) + min_time
+ time.sleep(sleep_time)
- # Check position validity
- valid = True
- if len(validity) > i:
+ # Check position validity
+ valid = True
+ if len(validity) > i:
- valid = validity[i]
+ valid = validity[i]
- if valid:
+ if valid:
- # Edit gaze position
- move_x = center_A[0] + (center_B[0] - center_A[0]) * (i / size)
- move_y = center_A[1] + (center_B[1] - center_A[1]) * (i / size)
- gaze_position = GazeFeatures.GazePosition((move_x, move_y))
+ # Edit gaze position
+ move_x = center_A[0] + (center_B[0] - center_A[0]) * (i / size)
+ move_y = center_A[1] + (center_B[1] - center_A[1]) * (i / size)
+ gaze_position = GazeFeatures.GazePosition((move_x, move_y))
- else:
+ else:
- gaze_position = GazeFeatures.GazePosition()
+ gaze_position = GazeFeatures.GazePosition()
- # Timestamp gaze position
- gaze_position.timestamp = time.time() - start_time + start_ts
+ # Timestamp gaze position
+ gaze_position.timestamp = time.time() - start_time + start_ts
- # Store gaze position
- ts_gaze_positions.append(gaze_position)
+ # Store gaze position
+ ts_gaze_positions.append(gaze_position)
- return ts_gaze_positions
+ return ts_gaze_positions
class TestVelocityThresholdIdentificationClass(unittest.TestCase):
- """Test VelocityThresholdIdentification class."""
-
- def test_fixation_identification(self):
- """Test VelocityThresholdIdentification fixation identification."""
-
- size = 10
- center = (0, 0)
- deviation_max = 10
- min_time = 0.05
- max_time = 0.1
- velocity_max = deviation_max / min_time
-
- ts_gaze_positions = build_gaze_fixation(size, center, deviation_max, min_time, max_time)
- gaze_movement_identifier = VelocityThresholdIdentification.GazeMovementIdentifier(velocity_max_threshold=velocity_max, duration_min_threshold=max_time*2)
- ts_fixations, ts_saccades, ts_status = gaze_movement_identifier.browse(ts_gaze_positions)
-
- # Check result size
- self.assertEqual(len(ts_fixations), 1)
- self.assertEqual(len(ts_saccades), 0)
- self.assertEqual(len(ts_status), size - 1)
-
- # Check fixation
- fixation = ts_fixations.pop(0)
-
- self.assertEqual(len(fixation), size - 1)
- self.assertGreaterEqual(fixation.duration, (size - 2) * min_time)
- self.assertLessEqual(fixation.duration, (size - 2) * max_time)
- self.assertLessEqual(fixation.is_finished(), True)
-
- def test_fixation_and_direct_saccade_identification(self):
- """Test VelocityThresholdIdentification fixation and saccade identification."""
-
- size = 10
- center_A = (0, 0)
- center_B = (500, 500)
- deviation_max = 10
- min_time = 0.05
- max_time = 0.1
- velocity_max = deviation_max / min_time
-
- ts_gaze_positions_A = build_gaze_fixation(size, center_A, deviation_max, min_time, max_time)
- ts_gaze_positions_B = build_gaze_fixation(size, center_B, deviation_max, min_time, max_time, start_ts=ts_gaze_positions_A[-1].timestamp)
-
- ts_gaze_positions = ts_gaze_positions_A + ts_gaze_positions_B
-
- gaze_movement_identifier = VelocityThresholdIdentification.GazeMovementIdentifier(velocity_max_threshold=velocity_max, duration_min_threshold=max_time*2)
- ts_fixations, ts_saccades, ts_status = gaze_movement_identifier.browse(ts_gaze_positions)
-
- # Check result size
- self.assertEqual(len(ts_fixations), 2)
- self.assertEqual(len(ts_saccades), 1)
- self.assertEqual(len(ts_status), size * 2 - 1)
-
- # Check first fixation
- fixation = ts_fixations.pop(0)
-
- self.assertEqual(len(fixation), size - 1)
- self.assertGreaterEqual(fixation.duration, (size - 2) * min_time)
- self.assertLessEqual(fixation.duration, (size - 2) * max_time)
- self.assertLessEqual(fixation.is_finished(), True)
-
- # Check first saccade
- saccade = ts_saccades.pop(0)
-
- self.assertEqual(len(saccade), 2)
- self.assertGreaterEqual(saccade.duration, min_time)
- self.assertLessEqual(saccade.duration, max_time)
- self.assertLessEqual(saccade.is_finished(), True)
-
- # Check that last position of a movement is equal to first position of next movement
- self.assertEqual(fixation[-1].timestamp, saccade[0].timestamp)
- self.assertEqual(fixation[-1].value, saccade[0].value)
-
- # Check second fixation
- fixation = ts_fixations.pop(0)
-
- self.assertEqual(len(fixation), size)
- self.assertGreaterEqual(fixation.duration, (size - 1) * min_time)
- self.assertLessEqual(fixation.duration, (size - 1) * max_time)
- self.assertLessEqual(fixation.is_finished(), True)
-
- # Check that last position of a movement is equal to first position of next movement
- self.assertEqual(saccade[-1].timestamp, fixation[0].timestamp)
- self.assertEqual(saccade[-1].value, fixation[0].value)
-
- def test_fixation_and_short_saccade_identification(self):
- """Test VelocityThresholdIdentification fixation and saccade identification."""
-
- size = 10
- move = 2
- center_A = (0, 0)
- out_A = (10, 10)
- center_B = (50, 50)
- deviation_max = 10
- min_time = 0.05
- max_time = 0.1
- velocity_max = deviation_max / min_time
-
- ts_gaze_positions_A = build_gaze_fixation(size, center_A, deviation_max, min_time, max_time)
- ts_move_positions = build_gaze_saccade(move, out_A, center_B, min_time, min_time, start_ts=ts_gaze_positions_A[-1].timestamp)
- ts_gaze_positions_B = build_gaze_fixation(size, center_B, deviation_max, min_time, max_time, start_ts=ts_move_positions[-1].timestamp)
-
- ts_gaze_positions = ts_gaze_positions_A + ts_move_positions + ts_gaze_positions_B
-
- gaze_movement_identifier = VelocityThresholdIdentification.GazeMovementIdentifier(velocity_max_threshold=velocity_max, duration_min_threshold=max_time*2)
- ts_fixations, ts_saccades, ts_status = gaze_movement_identifier.browse(ts_gaze_positions)
-
- # Check result size
- self.assertEqual(len(ts_fixations), 2)
- self.assertEqual(len(ts_saccades), 1)
- self.assertEqual(len(ts_status), 2 * size + move - 1)
-
- # Check first fixation
- fixation = ts_fixations.pop(0)
-
- self.assertEqual(len(fixation), size - 1) # BUG: NOT ALWAYS TRUE !!!
- self.assertGreaterEqual(fixation.duration, (size - 2) * min_time)
- self.assertLessEqual(fixation.duration, (size - 2) * max_time)
- self.assertLessEqual(fixation.is_finished(), True)
-
- # Check first saccade
- saccade = ts_saccades.pop(0)
-
- self.assertEqual(len(saccade), move + 2)
- self.assertGreaterEqual(saccade.duration, (move + 1) * min_time)
- self.assertLessEqual(saccade.duration, (move + 1) * max_time)
- self.assertLessEqual(saccade.is_finished(), True)
-
- # Check that last position of a movement is equal to first position of next movement
- self.assertEqual(fixation[-1].timestamp, saccade[0].timestamp)
- self.assertEqual(fixation[-1].value, saccade[0].value)
-
- # Check second fixation
- fixation = ts_fixations.pop(0)
-
- self.assertEqual(len(fixation), size)
- self.assertGreaterEqual(fixation.duration, (size - 1) * min_time)
- self.assertLessEqual(fixation.duration, (size - 1) * max_time)
- self.assertLessEqual(fixation.is_finished(), True)
-
- # Check that last position of a movement is equal to first position of next movement
- self.assertEqual(saccade[-1], fixation[0])
- self.assertEqual(saccade[-1].value, fixation[0].value)
-
- def test_invalid_gaze_position(self):
- """Test VelocityThresholdIdentification fixation and saccade identification with invalid gaze position."""
-
- size = 15
- center = (0, 0)
- deviation_max = 10
- min_time = 0.05
- max_time = 0.1
- velocity_max = deviation_max / min_time
- validity = [True, True, True, True, True, True, True, False, False, False, True, True, True, True, True]
-
- ts_gaze_positions = build_gaze_fixation(size, center, deviation_max, min_time, max_time, validity=validity)
-
- gaze_movement_identifier = VelocityThresholdIdentification.GazeMovementIdentifier(velocity_max_threshold=velocity_max, duration_min_threshold=max_time*2)
- ts_fixations, ts_saccades, ts_status = gaze_movement_identifier.browse(ts_gaze_positions)
-
- # Check result size
- self.assertEqual(len(ts_fixations), 2)
- self.assertEqual(len(ts_saccades), 0)
- self.assertEqual(len(ts_status), len(validity)-5)
-
- # Check first fixation
- fixation = ts_fixations.pop(0)
-
- self.assertEqual(len(fixation), 6)
- self.assertGreaterEqual(fixation.duration, 5 * min_time)
- self.assertLessEqual(fixation.duration, 5 * max_time)
- self.assertLessEqual(fixation.is_finished(), True)
+ """Test VelocityThresholdIdentification class."""
+
+ def test_fixation_identification(self):
+ """Test VelocityThresholdIdentification fixation identification."""
+
+ size = 10
+ center = (0, 0)
+ deviation_max = 10
+ min_time = 0.05
+ max_time = 0.1
+ velocity_max = deviation_max / min_time
+
+ ts_gaze_positions = build_gaze_fixation(size, center, deviation_max, min_time, max_time)
+ gaze_movement_identifier = VelocityThresholdIdentification.GazeMovementIdentifier(velocity_max_threshold=velocity_max, duration_min_threshold=max_time*2)
+ ts_fixations, ts_saccades, ts_status = gaze_movement_identifier.browse(ts_gaze_positions)
+
+ # Check result size
+ self.assertEqual(len(ts_fixations), 1)
+ self.assertEqual(len(ts_saccades), 0)
+ self.assertEqual(len(ts_status), size - 1)
+
+ # Check fixation
+ fixation = ts_fixations.pop(0)
+
+ self.assertEqual(len(fixation), size - 1)
+ self.assertGreaterEqual(fixation.duration, (size - 2) * min_time)
+ self.assertLessEqual(fixation.duration, (size - 2) * max_time)
+ self.assertLessEqual(fixation.is_finished(), True)
+
+ def test_fixation_and_direct_saccade_identification(self):
+ """Test VelocityThresholdIdentification fixation and saccade identification."""
+
+ size = 10
+ center_A = (0, 0)
+ center_B = (500, 500)
+ deviation_max = 10
+ min_time = 0.05
+ max_time = 0.1
+ velocity_max = deviation_max / min_time
+
+ ts_gaze_positions_A = build_gaze_fixation(size, center_A, deviation_max, min_time, max_time)
+ ts_gaze_positions_B = build_gaze_fixation(size, center_B, deviation_max, min_time, max_time, start_ts=ts_gaze_positions_A[-1].timestamp)
+
+ ts_gaze_positions = ts_gaze_positions_A + ts_gaze_positions_B
+
+ gaze_movement_identifier = VelocityThresholdIdentification.GazeMovementIdentifier(velocity_max_threshold=velocity_max, duration_min_threshold=max_time*2)
+ ts_fixations, ts_saccades, ts_status = gaze_movement_identifier.browse(ts_gaze_positions)
+
+ # Check result size
+ self.assertEqual(len(ts_fixations), 2)
+ self.assertEqual(len(ts_saccades), 1)
+ self.assertEqual(len(ts_status), size * 2 - 1)
+
+ # Check first fixation
+ fixation = ts_fixations.pop(0)
+
+ self.assertEqual(len(fixation), size - 1)
+ self.assertGreaterEqual(fixation.duration, (size - 2) * min_time)
+ self.assertLessEqual(fixation.duration, (size - 2) * max_time)
+ self.assertLessEqual(fixation.is_finished(), True)
+
+ # Check first saccade
+ saccade = ts_saccades.pop(0)
+
+ self.assertEqual(len(saccade), 2)
+ self.assertGreaterEqual(saccade.duration, min_time)
+ self.assertLessEqual(saccade.duration, max_time)
+ self.assertLessEqual(saccade.is_finished(), True)
+
+ # Check that last position of a movement is equal to first position of next movement
+ self.assertEqual(fixation[-1].timestamp, saccade[0].timestamp)
+ self.assertEqual(fixation[-1].value, saccade[0].value)
+
+ # Check second fixation
+ fixation = ts_fixations.pop(0)
+
+ self.assertEqual(len(fixation), size)
+ self.assertGreaterEqual(fixation.duration, (size - 1) * min_time)
+ self.assertLessEqual(fixation.duration, (size - 1) * max_time)
+ self.assertLessEqual(fixation.is_finished(), True)
+
+ # Check that last position of a movement is equal to first position of next movement
+ self.assertEqual(saccade[-1].timestamp, fixation[0].timestamp)
+ self.assertEqual(saccade[-1].value, fixation[0].value)
+
+ def test_fixation_and_short_saccade_identification(self):
+ """Test VelocityThresholdIdentification fixation and saccade identification."""
+
+ size = 10
+ move = 2
+ center_A = (0, 0)
+ out_A = (10, 10)
+ center_B = (50, 50)
+ deviation_max = 10
+ min_time = 0.05
+ max_time = 0.1
+ velocity_max = deviation_max / min_time
+
+ ts_gaze_positions_A = build_gaze_fixation(size, center_A, deviation_max, min_time, max_time)
+ ts_move_positions = build_gaze_saccade(move, out_A, center_B, min_time, min_time, start_ts=ts_gaze_positions_A[-1].timestamp)
+ ts_gaze_positions_B = build_gaze_fixation(size, center_B, deviation_max, min_time, max_time, start_ts=ts_move_positions[-1].timestamp)
+
+ ts_gaze_positions = ts_gaze_positions_A + ts_move_positions + ts_gaze_positions_B
+
+ gaze_movement_identifier = VelocityThresholdIdentification.GazeMovementIdentifier(velocity_max_threshold=velocity_max, duration_min_threshold=max_time*2)
+ ts_fixations, ts_saccades, ts_status = gaze_movement_identifier.browse(ts_gaze_positions)
+
+ # Check result size
+ self.assertEqual(len(ts_fixations), 2)
+ self.assertEqual(len(ts_saccades), 1)
+ self.assertEqual(len(ts_status), 2 * size + move - 1)
+
+ # Check first fixation
+ fixation = ts_fixations.pop(0)
+
+ self.assertEqual(len(fixation), size - 1) # BUG: NOT ALWAYS TRUE !!!
+ self.assertGreaterEqual(fixation.duration, (size - 2) * min_time)
+ self.assertLessEqual(fixation.duration, (size - 2) * max_time)
+ self.assertLessEqual(fixation.is_finished(), True)
+
+ # Check first saccade
+ saccade = ts_saccades.pop(0)
+
+ self.assertEqual(len(saccade), move + 2)
+ self.assertGreaterEqual(saccade.duration, (move + 1) * min_time)
+ self.assertLessEqual(saccade.duration, (move + 1) * max_time)
+ self.assertLessEqual(saccade.is_finished(), True)
+
+ # Check that last position of a movement is equal to first position of next movement
+ self.assertEqual(fixation[-1].timestamp, saccade[0].timestamp)
+ self.assertEqual(fixation[-1].value, saccade[0].value)
+
+ # Check second fixation
+ fixation = ts_fixations.pop(0)
+
+ self.assertEqual(len(fixation), size)
+ self.assertGreaterEqual(fixation.duration, (size - 1) * min_time)
+ self.assertLessEqual(fixation.duration, (size - 1) * max_time)
+ self.assertLessEqual(fixation.is_finished(), True)
+
+ # Check that last position of a movement is equal to first position of next movement
+ self.assertEqual(saccade[-1], fixation[0])
+ self.assertEqual(saccade[-1].value, fixation[0].value)
+
+ def test_invalid_gaze_position(self):
+ """Test VelocityThresholdIdentification fixation and saccade identification with invalid gaze position."""
+
+ size = 15
+ center = (0, 0)
+ deviation_max = 10
+ min_time = 0.05
+ max_time = 0.1
+ velocity_max = deviation_max / min_time
+ validity = [True, True, True, True, True, True, True, False, False, False, True, True, True, True, True]
+
+ ts_gaze_positions = build_gaze_fixation(size, center, deviation_max, min_time, max_time, validity=validity)
+
+ gaze_movement_identifier = VelocityThresholdIdentification.GazeMovementIdentifier(velocity_max_threshold=velocity_max, duration_min_threshold=max_time*2)
+ ts_fixations, ts_saccades, ts_status = gaze_movement_identifier.browse(ts_gaze_positions)
+
+ # Check result size
+ self.assertEqual(len(ts_fixations), 2)
+ self.assertEqual(len(ts_saccades), 0)
+ self.assertEqual(len(ts_status), len(validity)-5)
+
+ # Check first fixation
+ fixation = ts_fixations.pop(0)
+
+ self.assertEqual(len(fixation), 6)
+ self.assertGreaterEqual(fixation.duration, 5 * min_time)
+ self.assertLessEqual(fixation.duration, 5 * max_time)
+ self.assertLessEqual(fixation.is_finished(), True)
- # Check second fixation
- fixation = ts_fixations.pop(0)
-
- self.assertEqual(len(fixation), 4)
- self.assertGreaterEqual(fixation.duration, 3 * min_time)
- self.assertLessEqual(fixation.duration, 3 * max_time)
- self.assertLessEqual(fixation.is_finished(), True)
+ # Check second fixation
+ fixation = ts_fixations.pop(0)
+
+ self.assertEqual(len(fixation), 4)
+ self.assertGreaterEqual(fixation.duration, 3 * min_time)
+ self.assertLessEqual(fixation.duration, 3 * max_time)
+ self.assertLessEqual(fixation.is_finished(), True)
- def test_identification_browsing(self):
- """Test VelocityThresholdIdentification identification browsing."""
-
- size = 10
- center_A = (0, 0)
- center_B = (50, 50)
- deviation_max = 10
- min_time = 0.01
- max_time = 0.1
- velocity_max = deviation_max / min_time
-
- ts_gaze_positions_A = build_gaze_fixation(size, center_A, deviation_max, min_time, max_time)
- ts_gaze_positions_B = build_gaze_fixation(size, center_B, deviation_max, min_time, max_time, start_ts=ts_gaze_positions_A[-1].timestamp)
+ def test_identification_browsing(self):
+ """Test VelocityThresholdIdentification identification browsing."""
+
+ size = 10
+ center_A = (0, 0)
+ center_B = (50, 50)
+ deviation_max = 10
+ min_time = 0.01
+ max_time = 0.1
+ velocity_max = deviation_max / min_time
+
+ ts_gaze_positions_A = build_gaze_fixation(size, center_A, deviation_max, min_time, max_time)
+ ts_gaze_positions_B = build_gaze_fixation(size, center_B, deviation_max, min_time, max_time, start_ts=ts_gaze_positions_A[-1].timestamp)
- ts_gaze_positions = ts_gaze_positions_A + ts_gaze_positions_B
+ ts_gaze_positions = ts_gaze_positions_A + ts_gaze_positions_B
- gaze_movement_identifier = VelocityThresholdIdentification.GazeMovementIdentifier(velocity_max_threshold=velocity_max, duration_min_threshold=max_time*2)
+ gaze_movement_identifier = VelocityThresholdIdentification.GazeMovementIdentifier(velocity_max_threshold=velocity_max, duration_min_threshold=max_time*2)
- # Iterate on gaze positions
- for gaze_position in ts_gaze_positions:
+ # Iterate on gaze positions
+ for gaze_position in ts_gaze_positions:
- finished_gaze_movement = gaze_movement_identifier.identify(gaze_position, terminate=(gaze_position.timestamp == ts_gaze_positions[-1]))
+ finished_gaze_movement = gaze_movement_identifier.identify(gaze_position, terminate=(gaze_position.timestamp == ts_gaze_positions[-1]))
- # Check that last gaze position date is not equal to given gaze position date
- if finished_gaze_movement:
+ # Check that last gaze position date is not equal to given gaze position date
+ if finished_gaze_movement:
- self.assertNotEqual(finished_gaze_movement[-1].timestamp, gaze_position.timestamp)
+ self.assertNotEqual(finished_gaze_movement[-1].timestamp, gaze_position.timestamp)
- # Check that last gaze position date of current movement is equal to given gaze position date
- current_gaze_movement = gaze_movement_identifier.current_gaze_movement()
- if current_gaze_movement:
+ # Check that last gaze position date of current movement is equal to given gaze position date
+ current_gaze_movement = gaze_movement_identifier.current_gaze_movement()
+ if current_gaze_movement:
- self.assertEqual(current_gaze_movement[-1].timestamp, gaze_position.timestamp)
+ self.assertEqual(current_gaze_movement[-1].timestamp, gaze_position.timestamp)
if __name__ == '__main__':
- unittest.main() \ No newline at end of file
+ unittest.main() \ No newline at end of file
diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py
index 2d9c281..8d9eceb 100644
--- a/src/argaze/ArFeatures.py
+++ b/src/argaze/ArFeatures.py
@@ -96,7 +96,7 @@ class ArLayer(DataFeatures.SharedObject, DataFeatures.PipelineStepObject):
Defines a space where to make matching of gaze movements and AOI and inside which those matching need to be analyzed.
!!! note
- Inherits from DataFeatures.SharedObject class to be shared by multiple threads.
+ Inherits from DataFeatures.SharedObject class to be shared by multiple threads.
"""
@DataFeatures.PipelineStepInit
@@ -320,7 +320,7 @@ class ArLayer(DataFeatures.SharedObject, DataFeatures.PipelineStepObject):
Project timestamped gaze movement into layer.
!!! warning
- Be aware that gaze movement positions are in the same range of value than aoi_scene size attribute.
+ Be aware that gaze movement positions are in the same range of value than aoi_scene size attribute.
Parameters:
gaze_movement: gaze movement to project
@@ -435,7 +435,7 @@ class ArFrame(DataFeatures.SharedObject, DataFeatures.PipelineStepObject):
Defines a rectangular area where to project in timestamped gaze positions and inside which they need to be analyzed.
!!! note
- Inherits from DataFeatures.SharedObject class to be shared by multiple threads
+ Inherits from DataFeatures.SharedObject class to be shared by multiple threads
"""
@DataFeatures.PipelineStepInit
@@ -703,7 +703,7 @@ class ArFrame(DataFeatures.SharedObject, DataFeatures.PipelineStepObject):
Project timestamped gaze position into frame.
!!! warning
- Be aware that gaze positions are in the same range of value than size attribute.
+ Be aware that gaze positions are in the same range of value than size attribute.
Parameters:
timestamped_gaze_position: gaze position to project
@@ -1231,7 +1231,7 @@ class ArCamera(ArFrame):
self.__projection_cache_writer.write( (timestamp, exception) )
- def _read_projection_cache(self, timestamp: int|float):
+ def _read_projection_cache(self, timestamp: int|float) -> bool:
"""Read layers aoi scene from the projection cache.
Parameters:
@@ -1305,10 +1305,10 @@ class ArCamera(ArFrame):
"""Copy camera frame background into scene frames background.
!!! warning
- This method have to be called once AOI have been projected into camera frame layers.
+ This method have to be called once AOI have been projected into camera frame layers.
!!! note
- This method makes each frame to send an 'on_copy_background_into_scenes_frames' signal to their observers.
+ This method makes each frame to send an 'on_copy_background_into_scenes_frames' signal to their observers.
"""
# Project camera frame background into each scene frame if possible
@@ -1445,7 +1445,7 @@ class ArCamera(ArFrame):
"""Project timestamped gaze position into each scene frames.
!!! warning
- watch method needs to be called first.
+ watch method needs to be called first.
Parameters:
timestamped_gaze_position: gaze position to project
@@ -1504,7 +1504,7 @@ DEFAULT_ARCONTEXT_IMAGE_PARAMETERS = {
class ArContext(DataFeatures.PipelineStepObject):
"""
- Defines abstract Python context manager to handle pipeline inputs.
+ Defines abstract Python context manager to handle eye tracker data before passing them to a processing pipeline.
"""
# noinspection PyMissingConstructor
diff --git a/src/argaze/ArUcoMarker/ArUcoDetector.py b/src/argaze/ArUcoMarker/ArUcoDetector.py
index 32091a4..50da144 100644
--- a/src/argaze/ArUcoMarker/ArUcoDetector.py
+++ b/src/argaze/ArUcoMarker/ArUcoDetector.py
@@ -33,7 +33,7 @@ class DetectorParameters():
"""Wrapper class around ArUco marker detector parameters.
!!! note
- More details on [opencv page](https://docs.opencv.org/4.x/d1/dcd/structcv_1_1aruco_1_1DetectorParameters.html)
+ More details on [opencv page](https://docs.opencv.org/4.x/d1/dcd/structcv_1_1aruco_1_1DetectorParameters.html)
"""
__parameters = aruco.DetectorParameters()
@@ -188,13 +188,13 @@ class ArUcoDetector(DataFeatures.PipelineStepObject):
"""Detect all ArUco markers into an image.
!!! danger "DON'T MIRROR IMAGE"
- It makes the markers detection to fail.
+ It makes the markers detection to fail.
!!! danger "DON'T UNDISTORTED IMAGE"
- Camera intrinsic parameters and distortion coefficients are used later during pose estimation.
+ Camera intrinsic parameters and distortion coefficients are used later during pose estimation.
!!! note
- The pose of markers will be also estimated if the pose_size attribute is not None.
+ The pose of markers will be also estimated if the pose_size attribute is not None.
"""
# Reset detected markers data
@@ -262,7 +262,7 @@ class ArUcoDetector(DataFeatures.PipelineStepObject):
ids: markers id list to select detected markers.
!!! warning
- This method have to called after 'detect_markers'
+ This method have to called after 'detect_markers'
"""
# Is there detected markers ?
@@ -324,7 +324,7 @@ class ArUcoDetector(DataFeatures.PipelineStepObject):
"""Detect ArUco markers board in image setting up the number of detected markers needed to agree detection.
!!! danger "DON'T MIRROR IMAGE"
- It makes the markers detection to fail.
+ It makes the markers detection to fail.
"""
# detect markers from gray picture
diff --git a/src/argaze/ArUcoMarker/ArUcoMarker.py b/src/argaze/ArUcoMarker/ArUcoMarker.py
index bfd6350..fddc2aa 100644
--- a/src/argaze/ArUcoMarker/ArUcoMarker.py
+++ b/src/argaze/ArUcoMarker/ArUcoMarker.py
@@ -28,80 +28,80 @@ import cv2.aruco as aruco
@dataclass
class ArUcoMarker():
- """Define ArUco marker class."""
+ """Define ArUco marker class."""
- dictionary: ArUcoMarkerDictionary.ArUcoMarkerDictionary
- """Dictionary to which it belongs."""
+ dictionary: ArUcoMarkerDictionary.ArUcoMarkerDictionary
+ """Dictionary to which it belongs."""
- identifier: int
- """Index into dictionary"""
+ identifier: int
+ """Index into dictionary"""
- size: float = field(default=math.nan)
- """Size of marker in centimeters."""
+ size: float = field(default=math.nan)
+ """Size of marker in centimeters."""
- corners: numpy.array = field(init=False, repr=False)
- """Estimated 2D corners position in camera image referential."""
+ corners: numpy.array = field(init=False, repr=False)
+ """Estimated 2D corners position in camera image referential."""
- translation: numpy.array = field(init=False, repr=False)
- """Estimated 3D center position in camera world referential."""
+ translation: numpy.array = field(init=False, repr=False)
+ """Estimated 3D center position in camera world referential."""
- rotation: numpy.array = field(init=False, repr=False)
- """Estimated 3D marker rotation in camera world referential."""
+ rotation: numpy.array = field(init=False, repr=False)
+ """Estimated 3D marker rotation in camera world referential."""
- points: numpy.array = field(init=False, repr=False)
- """Estimated 3D corners positions in camera world referential."""
+ points: numpy.array = field(init=False, repr=False)
+ """Estimated 3D corners positions in camera world referential."""
- @property
- def center(self) -> numpy.array:
- """Get 2D center position in camera image referential."""
+ @property
+ def center(self) -> numpy.array:
+ """Get 2D center position in camera image referential."""
- return self.corners[0].mean(axis=0)
+ return self.corners[0].mean(axis=0)
- def image(self, dpi) -> numpy.array:
- """Create marker matrix image at a given resolution.
+ def image(self, dpi) -> numpy.array:
+ """Create marker matrix image at a given resolution.
- !!! warning
- Marker size have to be setup before.
- """
+ !!! warning
+ Marker size have to be setup before.
+ """
- assert(not math.isnan(self.size))
+ assert(not math.isnan(self.size))
- dimension = round(self.size * dpi / 2.54) # 1 cm = 2.54 inches
- matrix = numpy.zeros((dimension, dimension, 1), dtype="uint8")
+ dimension = round(self.size * dpi / 2.54) # 1 cm = 2.54 inches
+ matrix = numpy.zeros((dimension, dimension, 1), dtype="uint8")
- aruco.generateImageMarker(self.dictionary.markers, self.identifier, dimension, matrix, 1)
+ aruco.generateImageMarker(self.dictionary.markers, self.identifier, dimension, matrix, 1)
- return numpy.repeat(matrix, 3).reshape(dimension, dimension, 3)
+ return numpy.repeat(matrix, 3).reshape(dimension, dimension, 3)
- def draw(self, image: numpy.array, K: numpy.array, D: numpy.array, color: tuple = None, draw_axes: dict = None):
- """Draw marker in image.
+ def draw(self, image: numpy.array, K: numpy.array, D: numpy.array, color: tuple = None, draw_axes: dict = None):
+ """Draw marker in image.
- Parameters:
- image: image where to
- K:
- D:
- color: marker color (if None, no marker drawn)
- draw_axes: enable marker axes drawing
+ Parameters:
+ image: image where to
+ K:
+ D:
+ color: marker color (if None, no marker drawn)
+ draw_axes: enable marker axes drawing
- !!! warning
- draw_axes needs marker size and pose estimation.
- """
+ !!! warning
+ draw_axes needs marker size and pose estimation.
+ """
- # Draw marker if required
- if color is not None:
+ # Draw marker if required
+ if color is not None:
- aruco.drawDetectedMarkers(image, [numpy.array([list(self.corners)])], numpy.array([self.identifier]), color)
+ aruco.drawDetectedMarkers(image, [numpy.array([list(self.corners)])], numpy.array([self.identifier]), color)
- # Draw marker axes if pose has been estimated, marker have a size and if required
- if self.translation.size == 3 and self.rotation.size == 9 and not math.isnan(self.size) and draw_axes is not None:
+ # Draw marker axes if pose has been estimated, marker have a size and if required
+ if self.translation.size == 3 and self.rotation.size == 9 and not math.isnan(self.size) and draw_axes is not None:
- cv2.drawFrameAxes(image, numpy.array(K), numpy.array(D), self.rotation, self.translation, self.size, **draw_axes)
+ cv2.drawFrameAxes(image, numpy.array(K), numpy.array(D), self.rotation, self.translation, self.size, **draw_axes)
- def save(self, destination_folder, dpi):
- """Save marker image as .png file into a destination folder."""
+ def save(self, destination_folder, dpi):
+ """Save marker image as .png file into a destination folder."""
- filename = f'{self.dictionary.name}_{self.dictionary.format}_{self.identifier}.png'
- filepath = f'{destination_folder}/{filename}'
+ filename = f'{self.dictionary.name}_{self.dictionary.format}_{self.identifier}.png'
+ filepath = f'{destination_folder}/{filename}'
- cv2.imwrite(filepath, self.image(dpi))
+ cv2.imwrite(filepath, self.image(dpi))
diff --git a/src/argaze/ArUcoMarker/ArUcoMarkerGroup.py b/src/argaze/ArUcoMarker/ArUcoMarkerGroup.py
index 1cca6c4..5575cad 100644
--- a/src/argaze/ArUcoMarker/ArUcoMarkerGroup.py
+++ b/src/argaze/ArUcoMarker/ArUcoMarkerGroup.py
@@ -206,11 +206,10 @@ class ArUcoMarkerGroup(DataFeatures.PipelineStepObject):
"""Load ArUco markers group from .obj file.
!!! note
- Expected object (o) name format: <DICTIONARY>#<IDENTIFIER>_Marker
+ Expected object (o) name format: <DICTIONARY>#<IDENTIFIER>_Marker
!!! note
- All markers have to belong to the same dictionary.
-
+ All markers have to belong to the same dictionary.
"""
new_dictionary = None
diff --git a/src/argaze/AreaOfInterest/AOI2DScene.py b/src/argaze/AreaOfInterest/AOI2DScene.py
index 9c74637..b19c6e9 100644
--- a/src/argaze/AreaOfInterest/AOI2DScene.py
+++ b/src/argaze/AreaOfInterest/AOI2DScene.py
@@ -43,10 +43,10 @@ class AOI2DScene(AOIFeatures.AOIScene):
svg_filepath: path to svg file
!!! note
- Available SVG elements are: path, rect and circle.
+ Available SVG elements are: path, rect and circle.
!!! warning
- Available SVG path d-string commands are: MoveTo (M) LineTo (L) and ClosePath (Z) commands.
+ Available SVG path d-string commands are: MoveTo (M) LineTo (L) and ClosePath (Z) commands.
"""
with minidom.parse(svg_filepath) as description_file:
diff --git a/src/argaze/AreaOfInterest/AOI3DScene.py b/src/argaze/AreaOfInterest/AOI3DScene.py
index 13ea354..232329c 100644
--- a/src/argaze/AreaOfInterest/AOI3DScene.py
+++ b/src/argaze/AreaOfInterest/AOI3DScene.py
@@ -179,8 +179,8 @@ class AOI3DScene(AOIFeatures.AOIScene):
"""Get AOI which are inside and out a given cone field.
!!! note
- **By default**
- The cone have its tip at origin and its base oriented to positive Z axis.
+ **By default**
+ The cone have its tip at origin and its base oriented to positive Z axis.
Returns:
scene inside the cone
@@ -226,11 +226,11 @@ class AOI3DScene(AOIFeatures.AOIScene):
D: camera distortion coefficients vector
!!! danger
- Camera distortion coefficients could project points which are far from image frame into it.
+ Camera distortion coefficients could project points which are far from image frame into it.
!!! note
- As gaze is mainly focusing on frame center, where the distortion is low,
- it could be acceptable to not use camera distortion.
+ As gaze is mainly focusing on frame center, where the distortion is low,
+ it could be acceptable to not use camera distortion.
"""
aoi2D_scene = AOI2DScene.AOI2DScene()
diff --git a/src/argaze/AreaOfInterest/AOIFeatures.py b/src/argaze/AreaOfInterest/AOIFeatures.py
index 25046ff..fb61f61 100644
--- a/src/argaze/AreaOfInterest/AOIFeatures.py
+++ b/src/argaze/AreaOfInterest/AOIFeatures.py
@@ -143,7 +143,7 @@ class AreaOfInterest(numpy.ndarray):
def bounding_box(self) -> numpy.array:
"""Get area's bounding box.
!!! warning
- Available for 2D AOI only."""
+ Available for 2D AOI only."""
assert (self.points_number > 1)
assert (self.dimension == 2)
@@ -162,7 +162,7 @@ class AreaOfInterest(numpy.ndarray):
def clockwise(self) -> Self:
"""Get area points in clockwise order.
!!! warning
- Available for 2D AOI only."""
+ Available for 2D AOI only."""
assert (self.dimension == 2)
@@ -175,9 +175,9 @@ class AreaOfInterest(numpy.ndarray):
def contains_point(self, point: tuple) -> bool:
"""Is a point inside area?
!!! warning
- Available for 2D AOI only.
+ Available for 2D AOI only.
!!! danger
- The AOI points must be sorted in clockwise order."""
+ The AOI points must be sorted in clockwise order."""
assert (self.dimension == 2)
assert (len(point) == self.dimension)
@@ -187,9 +187,9 @@ class AreaOfInterest(numpy.ndarray):
def inner_axis(self, x: float, y: float) -> tuple:
"""Transform a point coordinates from global axis to AOI axis.
!!! warning
- Available for 2D AOI only.
+ Available for 2D AOI only.
!!! danger
- The AOI points must be sorted in clockwise order."""
+ The AOI points must be sorted in clockwise order."""
assert (self.dimension == 2)
@@ -210,9 +210,9 @@ class AreaOfInterest(numpy.ndarray):
def outter_axis(self, x: float, y: float) -> tuple:
"""Transform a point coordinates from AOI axis to global axis.
!!! danger
- The AOI points must be sorted in clockwise order.
+ The AOI points must be sorted in clockwise order.
!!! danger
- The AOI must be a rectangle.
+ The AOI must be a rectangle.
"""
# Origin point
@@ -230,7 +230,7 @@ class AreaOfInterest(numpy.ndarray):
def circle_intersection(self, center: tuple, radius: float) -> tuple[numpy.array, float, float]:
"""Get intersection shape with a circle, intersection area / AOI area ratio and intersection area / circle area ratio.
!!! warning
- Available for 2D AOI only.
+ Available for 2D AOI only.
Returns:
intersection shape
@@ -267,7 +267,7 @@ class AreaOfInterest(numpy.ndarray):
def draw(self, image: numpy.array, color, border_size=1):
"""Draw 2D AOI into image.
!!! warning
- Available for 2D AOI only."""
+ Available for 2D AOI only."""
assert (self.dimension == 2)
diff --git a/src/argaze/DataFeatures.py b/src/argaze/DataFeatures.py
index 60e382b..2629e8e 100644
--- a/src/argaze/DataFeatures.py
+++ b/src/argaze/DataFeatures.py
@@ -134,7 +134,7 @@ def from_json(filepath: str) -> any:
Load object from json file.
!!! note
- The directory where json file is will be used as global working directory.
+ The directory where json file is will be used as global working directory.
Parameters:
filepath: path to json file
@@ -354,8 +354,7 @@ class TimestampedObjectsList(list):
"""Handle timestamped object into a list.
!!! warning "Timestamped objects are not sorted internally"
-
- Timestamped objects are considered to be stored according to their coming time.
+ Timestamped objects are considered to be stored according to their coming time.
"""
# noinspection PyMissingConstructor
@@ -450,12 +449,10 @@ class TimestampedObjectsList(list):
For example: to convert {"point": (0, 0)} data as two separated "x" and "y" columns, use split={"point": ["x", "y"]}
!!! warning "Values must be dictionaries"
-
- Each key is stored as a column name.
+ Each key is stored as a column name.
!!! note
-
- Timestamps are stored as index column called 'timestamp'.
+ Timestamps are stored as index column called 'timestamp'.
"""
df = pandas.DataFrame(self.tuples(), columns=self.__object_properties_names)
diff --git a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py
index 3d910c7..3bf8f46 100644
--- a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py
+++ b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py
@@ -26,195 +26,195 @@ from argaze.GazeAnalysis import DispersionThresholdIdentification, VelocityThres
class AOIMatcher(GazeFeatures.AOIMatcher):
- """Matching algorithm based on fixation's deviation circle coverage over AOI."""
+ """Matching algorithm based on fixation's deviation circle coverage over AOI."""
- @DataFeatures.PipelineStepInit
- def __init__(self, **kwargs):
+ @DataFeatures.PipelineStepInit
+ def __init__(self, **kwargs):
- # Init AOIMatcher class
- super().__init__()
+ # Init AOIMatcher class
+ super().__init__()
- self.__coverage_threshold = 0
+ self.__coverage_threshold = 0
- self.__reset()
+ self.__reset()
- @property
- def coverage_threshold(self) -> float:
- """Minimal coverage ratio to consider a fixation over an AOI (1 means that whole fixation's deviation circle have to be over the AOI)."""
- return self.__coverage_threshold
+ @property
+ def coverage_threshold(self) -> float:
+ """Minimal coverage ratio to consider a fixation over an AOI (1 means that whole fixation's deviation circle have to be over the AOI)."""
+ return self.__coverage_threshold
- @coverage_threshold.setter
- def coverage_threshold(self, coverage_threshold: float):
+ @coverage_threshold.setter
+ def coverage_threshold(self, coverage_threshold: float):
- self.__coverage_threshold = coverage_threshold
-
- def __reset(self):
+ self.__coverage_threshold = coverage_threshold
+
+ def __reset(self):
- self.__look_count = 0
- self.__looked_aoi_data = (None, None)
- self.__looked_probabilities = {}
- self.__circle_ratio_sum = {}
- self.__matched_gaze_movement = None
- self.__matched_region = None
+ self.__look_count = 0
+ self.__looked_aoi_data = (None, None)
+ self.__looked_probabilities = {}
+ self.__circle_ratio_sum = {}
+ self.__matched_gaze_movement = None
+ self.__matched_region = None
- @DataFeatures.PipelineStepMethod
- def match(self, gaze_movement: GazeFeatures.GazeMovement, aoi_scene) -> tuple[str, AOIFeatures.AreaOfInterest]:
- """Returns AOI with the maximal fixation's deviation circle coverage if above coverage threshold."""
+ @DataFeatures.PipelineStepMethod
+ def match(self, gaze_movement: GazeFeatures.GazeMovement, aoi_scene) -> tuple[str, AOIFeatures.AreaOfInterest]:
+ """Returns AOI with the maximal fixation's deviation circle coverage if above coverage threshold."""
- if GazeFeatures.is_fixation(gaze_movement):
+ if GazeFeatures.is_fixation(gaze_movement):
- self.__look_count += 1
+ self.__look_count += 1
- max_coverage = 0.
- most_likely_looked_aoi_data = (None, None)
- matched_region = None
+ max_coverage = 0.
+ most_likely_looked_aoi_data = (None, None)
+ matched_region = None
- for name, aoi in aoi_scene.items():
+ for name, aoi in aoi_scene.items():
- # DispersionThresholdIdentification.Fixation: use maximal deviation
- if issubclass(type(gaze_movement), DispersionThresholdIdentification.Fixation):
+ # DispersionThresholdIdentification.Fixation: use maximal deviation
+ if issubclass(type(gaze_movement), DispersionThresholdIdentification.Fixation):
- fixation_circle_radius = gaze_movement.deviation_max
+ fixation_circle_radius = gaze_movement.deviation_max
- # VelocityThresholdIdentification.Fixation: use amplitude
- elif issubclass(type(gaze_movement), VelocityThresholdIdentification.Fixation):
+ # VelocityThresholdIdentification.Fixation: use amplitude
+ elif issubclass(type(gaze_movement), VelocityThresholdIdentification.Fixation):
- fixation_circle_radius = gaze_movement.amplitude
+ fixation_circle_radius = gaze_movement.amplitude
- # Otherwise, compute maximal deviation
- else:
+ # Otherwise, compute maximal deviation
+ else:
- fixation_circle_radius = max(gaze_movement.distances(gaze_movement.focus))
+ fixation_circle_radius = max(gaze_movement.distances(gaze_movement.focus))
- # Intersect
- region, _, circle_ratio = aoi.circle_intersection(gaze_movement.focus, fixation_circle_radius)
+ # Intersect
+ region, _, circle_ratio = aoi.circle_intersection(gaze_movement.focus, fixation_circle_radius)
- if name not in self.exclude and circle_ratio > self.__coverage_threshold:
+ if name not in self.exclude and circle_ratio > self.__coverage_threshold:
- # Sum circle ratio to update aoi coverage
- try:
+ # Sum circle ratio to update aoi coverage
+ try:
- self.__circle_ratio_sum[name] += circle_ratio
+ self.__circle_ratio_sum[name] += circle_ratio
- except KeyError:
+ except KeyError:
- self.__circle_ratio_sum[name] = circle_ratio
+ self.__circle_ratio_sum[name] = circle_ratio
- # Update maximal coverage and most likely looked aoi
- if self.__circle_ratio_sum[name] > max_coverage:
+ # Update maximal coverage and most likely looked aoi
+ if self.__circle_ratio_sum[name] > max_coverage:
- max_coverage = self.__circle_ratio_sum[name]
- most_likely_looked_aoi_data = (name, aoi)
- matched_region = region
-
- # Check that aoi coverage happens
- if max_coverage > 0:
+ max_coverage = self.__circle_ratio_sum[name]
+ most_likely_looked_aoi_data = (name, aoi)
+ matched_region = region
+
+ # Check that aoi coverage happens
+ if max_coverage > 0:
- # Update looked aoi data
- # noinspection PyAttributeOutsideInit
- self.__looked_aoi_data = most_likely_looked_aoi_data
+ # Update looked aoi data
+ # noinspection PyAttributeOutsideInit
+ self.__looked_aoi_data = most_likely_looked_aoi_data
- # Calculate circle ratio means as looked probabilities
- # noinspection PyAttributeOutsideInit
- self.__looked_probabilities = {}
+ # Calculate circle ratio means as looked probabilities
+ # noinspection PyAttributeOutsideInit
+ self.__looked_probabilities = {}
- for aoi_name, circle_ratio_sum in self.__circle_ratio_sum.items():
+ for aoi_name, circle_ratio_sum in self.__circle_ratio_sum.items():
- circle_ratio_mean = circle_ratio_sum / self.__look_count
+ circle_ratio_mean = circle_ratio_sum / self.__look_count
- # Avoid probability greater than 1
- self.__looked_probabilities[aoi_name] = circle_ratio_mean if circle_ratio_mean < 1 else 1
+ # Avoid probability greater than 1
+ self.__looked_probabilities[aoi_name] = circle_ratio_mean if circle_ratio_mean < 1 else 1
- # Update matched gaze movement
- # noinspection PyAttributeOutsideInit
- self.__matched_gaze_movement = gaze_movement
+ # Update matched gaze movement
+ # noinspection PyAttributeOutsideInit
+ self.__matched_gaze_movement = gaze_movement
- # Update matched region
- # noinspection PyAttributeOutsideInit
- self.__matched_region = matched_region
+ # Update matched region
+ # noinspection PyAttributeOutsideInit
+ self.__matched_region = matched_region
- # Return
- return self.__looked_aoi_data
+ # Return
+ return self.__looked_aoi_data
- elif GazeFeatures.is_saccade(gaze_movement):
+ elif GazeFeatures.is_saccade(gaze_movement):
- self.__reset()
+ self.__reset()
- elif not gaze_movement:
+ elif not gaze_movement:
- self.__reset()
+ self.__reset()
- return (None, None)
+ return (None, None)
- def draw(self, image: numpy.array, aoi_scene: AOIFeatures.AOIScene, draw_matched_fixation: dict = None, draw_matched_region: dict = None, draw_looked_aoi: dict = None, update_looked_aoi: bool = False, looked_aoi_name_color: tuple = None, looked_aoi_name_offset: tuple = (0, 0)):
- """Draw matching into image.
-
- Parameters:
- image: where to draw
- aoi_scene: to refresh looked aoi if required
- draw_matched_fixation: Fixation.draw parameters (which depends on the loaded
- gaze movement identifier module, if None, no fixation is drawn)
- draw_matched_region: AOIFeatures.AOI.draw parameters (if None, no matched region is drawn)
- draw_looked_aoi: AOIFeatures.AOI.draw parameters (if None, no looked aoi is drawn)
- update_looked_aoi:
- looked_aoi_name_color: color of text (if None, no looked aoi name is drawn)
- looked_aoi_name_offset: offset of text from the upper left aoi bounding box corner
- """
+ def draw(self, image: numpy.array, aoi_scene: AOIFeatures.AOIScene, draw_matched_fixation: dict = None, draw_matched_region: dict = None, draw_looked_aoi: dict = None, update_looked_aoi: bool = False, looked_aoi_name_color: tuple = None, looked_aoi_name_offset: tuple = (0, 0)):
+ """Draw matching into image.
+
+ Parameters:
+ image: where to draw
+ aoi_scene: to refresh looked aoi if required
+ draw_matched_fixation: Fixation.draw parameters (which depends on the loaded
+ gaze movement identifier module, if None, no fixation is drawn)
+ draw_matched_region: AOIFeatures.AOI.draw parameters (if None, no matched region is drawn)
+ draw_looked_aoi: AOIFeatures.AOI.draw parameters (if None, no looked aoi is drawn)
+ update_looked_aoi:
+ looked_aoi_name_color: color of text (if None, no looked aoi name is drawn)
+ looked_aoi_name_offset: offset of text from the upper left aoi bounding box corner
+ """
- if self.__matched_gaze_movement is not None:
+ if self.__matched_gaze_movement is not None:
- if GazeFeatures.is_fixation(self.__matched_gaze_movement):
+ if GazeFeatures.is_fixation(self.__matched_gaze_movement):
- # Draw matched fixation if required
- if draw_matched_fixation is not None:
+ # Draw matched fixation if required
+ if draw_matched_fixation is not None:
- self.__matched_gaze_movement.draw(image, **draw_matched_fixation)
-
- # Draw matched aoi
- if self.looked_aoi().all() is not None:
+ self.__matched_gaze_movement.draw(image, **draw_matched_fixation)
+
+ # Draw matched aoi
+ if self.looked_aoi().all() is not None:
- if update_looked_aoi:
+ if update_looked_aoi:
- try:
+ try:
- # noinspection PyAttributeOutsideInit
- self.__looked_aoi_data = (self.looked_aoi_name(), aoi_scene[self.looked_aoi_name()])
+ # noinspection PyAttributeOutsideInit
+ self.__looked_aoi_data = (self.looked_aoi_name(), aoi_scene[self.looked_aoi_name()])
- except KeyError:
+ except KeyError:
- pass
+ pass
- # Draw looked aoi if required
- if draw_looked_aoi is not None:
+ # Draw looked aoi if required
+ if draw_looked_aoi is not None:
- self.looked_aoi().draw(image, **draw_looked_aoi)
+ self.looked_aoi().draw(image, **draw_looked_aoi)
- # Draw matched region if required
- if draw_matched_region is not None:
+ # Draw matched region if required
+ if draw_matched_region is not None:
- self.__matched_region.draw(image, **draw_matched_region)
+ self.__matched_region.draw(image, **draw_matched_region)
- # Draw looked aoi name if required
- if looked_aoi_name_color is not None:
+ # Draw looked aoi name if required
+ if looked_aoi_name_color is not None:
- top_left_corner_pixel = numpy.rint(self.looked_aoi().bounding_box[0]).astype(int) + looked_aoi_name_offset
- cv2.putText(image, self.looked_aoi_name(), top_left_corner_pixel, cv2.FONT_HERSHEY_SIMPLEX, 1, looked_aoi_name_color, 1, cv2.LINE_AA)
+ top_left_corner_pixel = numpy.rint(self.looked_aoi().bounding_box[0]).astype(int) + looked_aoi_name_offset
+ cv2.putText(image, self.looked_aoi_name(), top_left_corner_pixel, cv2.FONT_HERSHEY_SIMPLEX, 1, looked_aoi_name_color, 1, cv2.LINE_AA)
- def looked_aoi(self) -> AOIFeatures.AreaOfInterest:
- """Get most likely looked aoi for current fixation (e.g. the aoi with the highest coverage mean value)"""
+ def looked_aoi(self) -> AOIFeatures.AreaOfInterest:
+ """Get most likely looked aoi for current fixation (e.g. the aoi with the highest coverage mean value)"""
- return self.__looked_aoi_data[1]
+ return self.__looked_aoi_data[1]
- def looked_aoi_name(self) -> str:
- """Get most likely looked aoi name for current fixation (e.g. the aoi with the highest coverage mean value)"""
+ def looked_aoi_name(self) -> str:
+ """Get most likely looked aoi name for current fixation (e.g. the aoi with the highest coverage mean value)"""
- return self.__looked_aoi_data[0]
+ return self.__looked_aoi_data[0]
- def looked_probabilities(self) -> dict:
- """Get probabilities to be looked by current fixation for each aoi.
+ def looked_probabilities(self) -> dict:
+ """Get probabilities to be looked by current fixation for each aoi.
- !!! note
- aoi where fixation deviation circle never passed the coverage threshold will be missing.
- """
+ !!! note
+ aoi where fixation deviation circle never passed the coverage threshold will be missing.
+ """
- return self.__looked_probabilities \ No newline at end of file
+ return self.__looked_probabilities \ No newline at end of file
diff --git a/src/argaze/GazeAnalysis/Entropy.py b/src/argaze/GazeAnalysis/Entropy.py
index dfed82f..9d45d1d 100644
--- a/src/argaze/GazeAnalysis/Entropy.py
+++ b/src/argaze/GazeAnalysis/Entropy.py
@@ -24,79 +24,79 @@ from argaze.GazeAnalysis import TransitionMatrix
class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer):
- """Implementation of entropy algorithm as described in:
+ """Implementation of entropy algorithm as described in:
- **Krejtz K., Szmidt T., Duchowski A.T. (2014).**
- *Entropy-based statistical analysis of eye movement transitions.*
- Proceedings of the Symposium on Eye Tracking Research and Applications (ETRA'14, 159-166).
- [https://doi.org/10.1145/2578153.2578176](https://doi.org/10.1145/2578153.2578176)
- """
+ **Krejtz K., Szmidt T., Duchowski A.T. (2014).**
+ *Entropy-based statistical analysis of eye movement transitions.*
+ Proceedings of the Symposium on Eye Tracking Research and Applications (ETRA'14, 159-166).
+ [https://doi.org/10.1145/2578153.2578176](https://doi.org/10.1145/2578153.2578176)
+ """
- @DataFeatures.PipelineStepInit
- def __init__(self, **kwargs):
+ @DataFeatures.PipelineStepInit
+ def __init__(self, **kwargs):
- # Init AOIScanPathAnalyzer class
- super().__init__()
+ # Init AOIScanPathAnalyzer class
+ super().__init__()
- self.__transition_matrix_analyzer = None
- self.__stationary_entropy = -1
- self.__transition_entropy = -1
+ self.__transition_matrix_analyzer = None
+ self.__stationary_entropy = -1
+ self.__transition_entropy = -1
- @property
- def transition_matrix_analyzer(self) -> TransitionMatrix.AOIScanPathAnalyzer:
- """Bind to TransitionMatrix analyzer to get its transition_matrix_probabilities.
+ @property
+ def transition_matrix_analyzer(self) -> TransitionMatrix.AOIScanPathAnalyzer:
+ """Bind to TransitionMatrix analyzer to get its transition_matrix_probabilities.
- !!! warning "Mandatory"
- TransitionMatrix analyzer have to be loaded before.
- """
+ !!! warning "Mandatory"
+ TransitionMatrix analyzer have to be loaded before.
+ """
- return self.__transition_matrix_analyzer
+ return self.__transition_matrix_analyzer
- @transition_matrix_analyzer.setter
- def transition_matrix_analyzer(self, transition_matrix_analyzer: TransitionMatrix.AOIScanPathAnalyzer):
+ @transition_matrix_analyzer.setter
+ def transition_matrix_analyzer(self, transition_matrix_analyzer: TransitionMatrix.AOIScanPathAnalyzer):
- self.__transition_matrix_analyzer = transition_matrix_analyzer
+ self.__transition_matrix_analyzer = transition_matrix_analyzer
- @DataFeatures.PipelineStepMethod
- def analyze(self, aoi_scan_path: GazeFeatures.AOIScanPath):
+ @DataFeatures.PipelineStepMethod
+ def analyze(self, aoi_scan_path: GazeFeatures.AOIScanPath):
- assert(len(aoi_scan_path) > 1)
+ assert(len(aoi_scan_path) > 1)
- # Count total number of fixations and how many fixations are there per aoi
- scan_fixations_count, aoi_fixations_count = aoi_scan_path.fixations_count()
+ # Count total number of fixations and how many fixations are there per aoi
+ scan_fixations_count, aoi_fixations_count = aoi_scan_path.fixations_count()
- # Probability to have a fixation onto each aoi
- stationary_probabilities = {aoi: count/scan_fixations_count for aoi, count in aoi_fixations_count.items()}
+ # Probability to have a fixation onto each aoi
+ stationary_probabilities = {aoi: count/scan_fixations_count for aoi, count in aoi_fixations_count.items()}
- # Stationary entropy
- self.__stationary_entropy = 0
+ # Stationary entropy
+ self.__stationary_entropy = 0
- for aoi, p in stationary_probabilities.items():
+ for aoi, p in stationary_probabilities.items():
- self.__stationary_entropy += p * numpy.log(p + 1e-9)
+ self.__stationary_entropy += p * numpy.log(p + 1e-9)
- self.__stationary_entropy *= -1
+ self.__stationary_entropy *= -1
- # Transition entropy
- self.__transition_entropy = 0
+ # Transition entropy
+ self.__transition_entropy = 0
- destination_p_log_sum = self.transition_matrix_analyzer.transition_matrix_probabilities.apply(lambda row: row.apply(lambda p: p * numpy.log(p + 1e-9)).sum(), axis=1)
+ destination_p_log_sum = self.transition_matrix_analyzer.transition_matrix_probabilities.apply(lambda row: row.apply(lambda p: p * numpy.log(p + 1e-9)).sum(), axis=1)
- for aoi, s in destination_p_log_sum.items():
+ for aoi, s in destination_p_log_sum.items():
- self.__transition_entropy += s * stationary_probabilities[aoi]
+ self.__transition_entropy += s * stationary_probabilities[aoi]
- self.__transition_entropy *= -1
+ self.__transition_entropy *= -1
- @property
- def stationary_entropy(self) -> float:
- """Stationary entropy."""
+ @property
+ def stationary_entropy(self) -> float:
+ """Stationary entropy."""
- return self.__stationary_entropy
+ return self.__stationary_entropy
- @property
- def transition_entropy(self) -> float:
- """Transition entropy."""
+ @property
+ def transition_entropy(self) -> float:
+ """Transition entropy."""
- return self.__transition_entropy
- \ No newline at end of file
+ return self.__transition_entropy
+ \ No newline at end of file
diff --git a/src/argaze/GazeFeatures.py b/src/argaze/GazeFeatures.py
index 5ef3c32..4aa65e7 100644
--- a/src/argaze/GazeFeatures.py
+++ b/src/argaze/GazeFeatures.py
@@ -86,10 +86,10 @@ class GazePosition(tuple):
"""Add position.
!!! note
- The returned position precision is the maximal precision.
+ The returned position precision is the maximal precision.
!!! note
- The returned position timestamp is the self object timestamp.
+ The returned position timestamp is the self object timestamp.
"""
if self.__precision is not None and position.precision is not None:
@@ -106,10 +106,10 @@ class GazePosition(tuple):
"""Subtract position.
!!! note
- The returned position precision is the maximal precision.
+ The returned position precision is the maximal precision.
!!! note
- The returned position timestamp is the self object timestamp.
+ The returned position timestamp is the self object timestamp.
"""
if self.__precision is not None and position.precision is not None:
@@ -124,10 +124,10 @@ class GazePosition(tuple):
"""Reversed subtract position.
!!! note
- The returned position precision is the maximal precision.
+ The returned position precision is the maximal precision.
!!! note
- The returned position timestamp is the self object timestamp.
+ The returned position timestamp is the self object timestamp.
"""
if self.__precision is not None and position.precision is not None:
@@ -142,10 +142,10 @@ class GazePosition(tuple):
"""Multiply position by a factor.
!!! note
- The returned position precision is also multiplied by the factor.
+ The returned position precision is also multiplied by the factor.
!!! note
- The returned position timestamp is the self object timestamp.
+ The returned position timestamp is the self object timestamp.
"""
return GazePosition(tuple(numpy.array(self) * factor), precision=self.__precision * factor if self.__precision is not None else None, timestamp=self.timestamp)
@@ -153,10 +153,10 @@ class GazePosition(tuple):
"""divide position by a factor.
!!! note
- The returned position precision is also divided by the factor.
+ The returned position precision is also divided by the factor.
!!! note
- The returned position timestamp is the self object timestamp.
+ The returned position timestamp is the self object timestamp.
"""
return GazePosition(tuple(numpy.array(self) / factor), precision=self.__precision / factor if self.__precision is not None else None, timestamp=self.timestamp)
@@ -164,10 +164,10 @@ class GazePosition(tuple):
"""Power position by a factor.
!!! note
- The returned position precision is also powered by the factor.
+ The returned position precision is also powered by the factor.
!!! note
- The returned position timestamp is the self object timestamp.
+ The returned position timestamp is the self object timestamp.
"""
return GazePosition(tuple(numpy.array(self) ** factor),
precision=self.__precision ** factor if self.__precision is not None else None,
@@ -394,7 +394,7 @@ class GazeMovement(TimeStampedGazePositions):
"""Define abstract gaze movement class as timestamped gaze positions list.
!!! note
- Gaze movement timestamp is always equal to its first position timestamp.
+ Gaze movement timestamp is always equal to its first position timestamp.
Parameters:
positions: timestamp gaze positions.
@@ -578,7 +578,7 @@ class GazeMovementIdentifier(DataFeatures.PipelineStepObject):
"""Identify gaze movement from successive timestamped gaze positions.
!!! warning "Mandatory"
- Each identified gaze movement have to share its first/last gaze position with previous/next gaze movement.
+ Each identified gaze movement have to share its first/last gaze position with previous/next gaze movement.
Parameters:
timestamped_gaze_position: new gaze position from where identification have to be done considering former gaze positions.
@@ -694,7 +694,7 @@ class ScanStep():
last_saccade: a saccade that comes after the previous fixation.
!!! warning
- Scan step have to start by a fixation and then end by a saccade.
+ Scan step have to start by a fixation and then end by a saccade.
"""
def __init__(self, first_fixation: Fixation, last_saccade: Saccade):
@@ -813,7 +813,7 @@ class ScanPath(list):
def append_fixation(self, fixation):
"""Append new fixation to scan path.
!!! warning
- Consecutive fixations are ignored keeping the last fixation"""
+ Consecutive fixations are ignored keeping the last fixation"""
self.__last_fixation = fixation
@@ -925,7 +925,7 @@ class AOIScanStep():
letter: AOI unique letter to ease sequence analysis.
!!! warning
- Aoi scan step have to start by a fixation and then end by a saccade.
+ Aoi scan step have to start by a fixation and then end by a saccade.
"""
def __init__(self, movements: TimeStampedGazeMovements, aoi: str = '', letter: str = ''):
@@ -1014,7 +1014,7 @@ class AOIScanPath(list):
"""Edit list of all expected aoi.
!!! warning
- This will clear the AOIScanPath
+ This will clear the AOIScanPath
"""
# Check expected aoi are not the same as previous ones
@@ -1134,7 +1134,7 @@ class AOIScanPath(list):
"""Append new fixation to aoi scan path and return last new aoi scan step if one have been created.
!!! warning
- It could raise AOIScanStepError
+ It could raise AOIScanStepError
"""
# Replace None aoi by generic OutsideAOI name
diff --git a/src/argaze/__init__.py b/src/argaze/__init__.py
index a07fa93..be4cbfc 100644
--- a/src/argaze/__init__.py
+++ b/src/argaze/__init__.py
@@ -8,7 +8,7 @@ def load(filepath: str) -> any:
Load object from json file.
!!! note
- The directory where json file is will be used as global working directory.
+ The directory where json file is will be used as global working directory.
Parameters:
filepath: path to json file
diff --git a/src/argaze/__main__.py b/src/argaze/__main__.py
index 77875a8..76e9664 100644
--- a/src/argaze/__main__.py
+++ b/src/argaze/__main__.py
@@ -60,9 +60,10 @@ def load_context(args):
if draw_help:
cv2.rectangle(image, (int(width/4), int(height/3)), (int(width*3/4), int(height*2/3)), (127, 127, 127), -1)
+ cv2.rectangle(image, (int(width/4), int(height/3)), (int(width*3/4), int(height*2/3)), (255, 255, 255), 1)
info_stack = 1
- cv2.putText(image, f'HELP', (int(width/4)+20, int(height/3)+(info_stack*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'(H)elp', (int(width/4)+20, int(height/3)+(info_stack*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
# Blanck line
info_stack += 1
@@ -82,13 +83,14 @@ def load_context(args):
info_stack += 1
cv2.putText(image, f'Press f to pause/resume visualisation', (int(width/4)+20, int(height/3)+(info_stack*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
-
- info_stack += 1
- cv2.putText(image, f'Press h to hide/show this help panel', (int(width/4)+20, int(height/3)+(info_stack*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
info_stack += 1
cv2.putText(image, f'Press Escape to quit', (int(width/4)+20, int(height/3)+(info_stack*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ else:
+
+ cv2.putText(image, f'(H)elp', (width-105, height - 15), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+
if args.display is not None:
display_size = tuple(args.display)
@@ -127,7 +129,7 @@ def load_context(args):
# Draw parameters
draw_pipeline = True
- draw_help = True
+ draw_help = False
# Waiting for 'ctrl+C' interruption
with contextlib.suppress(KeyboardInterrupt), os.fdopen(pipe_file) if args.pipe_path is not None else contextlib.nullcontext() as pipe:
diff --git a/src/argaze/utils/UtilsFeatures.py b/src/argaze/utils/UtilsFeatures.py
index ff2bee6..23d6b24 100644
--- a/src/argaze/utils/UtilsFeatures.py
+++ b/src/argaze/utils/UtilsFeatures.py
@@ -250,7 +250,7 @@ class FileWriter(DataFeatures.PipelineStepObject):
"""Write data as a new line into file.
!!! note
- Tuple elements are converted into quoted strings separated by separator string.
+ Tuple elements are converted into quoted strings separated by separator string.
"""
# Format list or tuple element into quoted strings
@@ -307,7 +307,7 @@ class FileReader(DataFeatures.PipelineStepObject):
"""Read next data from file.
!!! note
- Quoted strings separated by separator string are converted into tuple elements.
+ Quoted strings separated by separator string are converted into tuple elements.
"""
try:
diff --git a/src/argaze/utils/contexts/OpenCV.py b/src/argaze/utils/contexts/OpenCV.py
index 111ed8e..273705a 100644
--- a/src/argaze/utils/contexts/OpenCV.py
+++ b/src/argaze/utils/contexts/OpenCV.py
@@ -26,7 +26,12 @@ import cv2
from argaze import ArFeatures, DataFeatures
-class Window(ArFeatures.LiveProcessingContext):
+class Cursor(ArFeatures.ArContext):
+ """Process cursor position over OpenCV window.
+
+ !!! warning
+ It is assumed that an OpenCV window with the same name than the context is used to display context's pipeline image.
+ """
@DataFeatures.PipelineStepInit
def __init__(self, **kwargs):
@@ -37,13 +42,13 @@ class Window(ArFeatures.LiveProcessingContext):
@DataFeatures.PipelineStepEnter
def __enter__(self):
- logging.info('OpenCV window context starts...')
+ logging.info('OpenCV.Cursor context starts...')
- # Create a window to display context
+ # Create a window
cv2.namedWindow(self.name, cv2.WINDOW_AUTOSIZE)
# Init timestamp
- self.__start_time = time.time()
+ self._start_time = time.time()
# Attach mouse event callback to window
cv2.setMouseCallback(self.name, self.__on_mouse_event)
@@ -53,7 +58,7 @@ class Window(ArFeatures.LiveProcessingContext):
@DataFeatures.PipelineStepExit
def __exit__(self, exception_type, exception_value, exception_traceback):
- logging.info('OpenCV window context stops...')
+ logging.info('OpenCV.Cursor context stops...')
# Delete window
cv2.destroyAllWindows()
@@ -61,20 +66,24 @@ class Window(ArFeatures.LiveProcessingContext):
def __on_mouse_event(self, event, x, y, flags, param):
"""Process pointer position."""
- logging.debug('Window.on_mouse_event %i %i', x, y)
+ logging.debug('OpenCV.Cursor.on_mouse_event %i %i', x, y)
if not self.is_paused():
# Process timestamped gaze position
- self._process_gaze_position(timestamp = int((time.time() - self.__start_time) * 1e3), x = x, y = y)
+ self._process_gaze_position(timestamp = int((time.time() - self._start_time) * 1e3), x = x, y = y)
-class Movie(ArFeatures.PostProcessingContext):
+class Movie(Cursor):
+ """Process movie images and cursor position over OpenCV window.
+ !!! warning
+ It is assumed that an OpenCV window with the same name than the context is used to display context's pipeline image.
+ """
@DataFeatures.PipelineStepInit
def __init__(self, **kwargs):
- # Init PostProcessingContext class
+ # Init Cursor class
super().__init__()
# Init private attributes
@@ -109,16 +118,10 @@ class Movie(ArFeatures.PostProcessingContext):
@DataFeatures.PipelineStepEnter
def __enter__(self):
- logging.info('OpenCV movie context starts...')
-
- # Create a window to display context
- cv2.namedWindow(self.name, cv2.WINDOW_AUTOSIZE)
-
- # Init timestamp
- self.__start_time = time.time()
+ logging.info('OpenCV.Movie context starts...')
- # Attach mouse event callback to window
- cv2.setMouseCallback(self.name, self.__on_mouse_event)
+ # Enter in Cursor context
+ super().__enter__()
# Open reading thread
self.__reading_thread = threading.Thread(target=self.__read)
@@ -174,33 +177,23 @@ class Movie(ArFeatures.PostProcessingContext):
@DataFeatures.PipelineStepExit
def __exit__(self, exception_type, exception_value, exception_traceback):
- logging.info('OpenCV movie context stops...')
+ logging.info('OpenCV.Movie context stops...')
+
+ # Exit from Cursor context
+ super().__exit__(exception_type, exception_value, exception_traceback)
# Close data stream
- self._stop_event.set()
+ self.stop()
# Stop reading thread
threading.Thread.join(self.__reading_thread)
- # Delete window
- cv2.destroyAllWindows()
-
- def __on_mouse_event(self, event, x, y, flags, param):
- """Process pointer position."""
-
- logging.debug('Window.on_mouse_event %i %i', x, y)
-
- if not self.is_paused():
-
- # Process timestamped gaze position
- self._process_gaze_position(timestamp = int((time.time() - self.__start_time) * 1e3), x = x, y = y)
-
def refresh(self):
"""Refresh current frame."""
self.__refresh = True
def previous(self):
-
+ """Go to previous frame."""
self.__next_image_index -= 1
# Clip image index
@@ -208,6 +201,7 @@ class Movie(ArFeatures.PostProcessingContext):
self.__next_image_index = 0
def next(self):
+ """Go to next frame."""
self.__next_image_index += 1
@@ -217,13 +211,13 @@ class Movie(ArFeatures.PostProcessingContext):
@property
def duration(self) -> int|float:
- """Get data duration."""
+ """Get movie duration."""
return self.__movie_length / self.__movie_fps
@property
def progression(self) -> float:
- """Get data processing progression between 0 and 1."""
+ """Get movie processing progression between 0 and 1."""
if self.__current_image_index is not None:
diff --git a/src/argaze/utils/contexts/Random.py b/src/argaze/utils/contexts/Random.py
index 29b9830..c7b2187 100644
--- a/src/argaze/utils/contexts/Random.py
+++ b/src/argaze/utils/contexts/Random.py
@@ -71,8 +71,29 @@ class GazePositionGenerator(ArFeatures.ArContext):
# Edit millisecond timestamp
timestamp = int((time.time() - start_time) * 1e3)
- self.__x += random.randint(-10, 10)
- self.__y += random.randint(-10, 10)
+ # Random saccade
+ if random.randint(0, 100) == 0:
+
+ rand_x = random.randint(0, int(self.__range[0] / 2))
+ rand_y = random.randint(0, int(self.__range[1] / 2))
+
+ self.__x += random.randint(-rand_x, rand_x)
+ self.__y += random.randint(-rand_y, rand_y)
+
+ # Random fixation
+ else:
+
+ self.__x += random.randint(-1, 1)
+ self.__y += random.randint(-1, 1)
+
+ # Clip position
+ if self.__x < 0 or self.__x > self.__range[0]:
+
+ self.__x = int(self.range[0] / 2)
+
+ if self.__y < 0 or self.__y > self.__range[1]:
+
+ self.__y = int(self.range[1] / 2)
logging.debug('> timestamp=%i, x=%i, y=%i', timestamp, self.__x, self.__y)
diff --git a/src/argaze/utils/contexts/TobiiProGlasses2.py b/src/argaze/utils/contexts/TobiiProGlasses2.py
index 80487f4..7f45f32 100644
--- a/src/argaze/utils/contexts/TobiiProGlasses2.py
+++ b/src/argaze/utils/contexts/TobiiProGlasses2.py
@@ -395,7 +395,7 @@ class LiveStream(ArFeatures.LiveProcessingContext):
@property
def configuration(self) -> dict:
- """Patch system configuration dictionary."""
+ """Edit system configuration dictionary."""
return self.__configuration
@configuration.setter
@@ -471,7 +471,7 @@ class LiveStream(ArFeatures.LiveProcessingContext):
"""Bind to a participant or create one if it doesn't exist.
!!! warning
- Bind to a project before.
+ Bind to a project before.
"""
if self.__participant_name is None:
diff --git a/src/argaze/utils/demo/opencv_cursor_context.json b/src/argaze/utils/demo/opencv_cursor_context.json
new file mode 100644
index 0000000..659ffd6
--- /dev/null
+++ b/src/argaze/utils/demo/opencv_cursor_context.json
@@ -0,0 +1,6 @@
+{
+ "argaze.utils.contexts.OpenCV.Cursor" : {
+ "name": "OpenCV cursor",
+ "pipeline": "gaze_analysis_pipeline.json"
+ }
+} \ No newline at end of file
diff --git a/src/argaze/utils/demo/opencv_window_context.json b/src/argaze/utils/demo/opencv_window_context.json
deleted file mode 100644
index d589665..0000000
--- a/src/argaze/utils/demo/opencv_window_context.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "argaze.utils.contexts.OpenCV.Window" : {
- "name": "OpenCV Window",
- "pipeline": "gaze_analysis_pipeline.json"
- }
-} \ No newline at end of file