aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThéo de la Hogue2024-07-03 17:14:43 +0200
committerThéo de la Hogue2024-07-03 17:14:43 +0200
commit8fc18a434da400f0fe82707e23838d6cc40a787d (patch)
tree9e42c9f7edb9364e9a0afedab30194820987a907
parent7b82b09e87d1475acf5040c67323421699a3ad06 (diff)
downloadargaze-8fc18a434da400f0fe82707e23838d6cc40a787d.zip
argaze-8fc18a434da400f0fe82707e23838d6cc40a787d.tar.gz
argaze-8fc18a434da400f0fe82707e23838d6cc40a787d.tar.bz2
argaze-8fc18a434da400f0fe82707e23838d6cc40a787d.tar.xz
Rewriting eye tracking context and gaze analysis sections.
-rw-r--r--docs/img/argaze_load_gui.pngbin0 -> 168761 bytes
-rw-r--r--docs/img/argaze_load_gui_random.pngbin0 -> 33593 bytes
-rw-r--r--docs/img/argaze_load_gui_random_pipeline.pngbin0 -> 74788 bytes
-rw-r--r--docs/img/eye_tracker_context.pngbin0 -> 41128 bytes
-rw-r--r--docs/img/pipeline_input_context.pngbin49064 -> 0 bytes
-rw-r--r--docs/index.md12
-rw-r--r--docs/user_guide/eye_tracking_context/advanced_topics/context_definition.md193
-rw-r--r--docs/user_guide/eye_tracking_context/advanced_topics/scripting.md106
-rw-r--r--docs/user_guide/eye_tracking_context/advanced_topics/timestamped_gaze_positions_edition.md (renamed from docs/user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md)13
-rw-r--r--docs/user_guide/eye_tracking_context/configuration_and_execution.md65
-rw-r--r--docs/user_guide/eye_tracking_context/context_modules/opencv.md47
-rw-r--r--docs/user_guide/eye_tracking_context/context_modules/pupil_labs.md32
-rw-r--r--docs/user_guide/eye_tracking_context/context_modules/random.md32
-rw-r--r--docs/user_guide/eye_tracking_context/context_modules/tobii_pro_glasses_2.md59
-rw-r--r--docs/user_guide/eye_tracking_context/introduction.md18
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md54
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md2
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md58
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/introduction.md9
-rw-r--r--docs/user_guide/gaze_analysis_pipeline/visualization.md33
-rw-r--r--docs/user_guide/pipeline_input_context/configuration_and_connection.md35
-rw-r--r--docs/user_guide/pipeline_input_context/context_definition.md57
-rw-r--r--docs/user_guide/pipeline_input_context/introduction.md24
-rw-r--r--docs/user_guide/utils/demonstrations_scripts.md16
-rw-r--r--docs/user_guide/utils/estimate_aruco_markers_pose.md60
-rw-r--r--docs/user_guide/utils/main_commands.md (renamed from docs/user_guide/utils/ready-made_scripts.md)31
-rw-r--r--mkdocs.yml25
-rw-r--r--src/argaze/ArFeatures.py4
-rw-r--r--src/argaze/utils/contexts/OpenCV.py66
-rw-r--r--src/argaze/utils/contexts/Random.py25
-rw-r--r--src/argaze/utils/demo/opencv_cursor_context.json6
-rw-r--r--src/argaze/utils/demo/opencv_window_context.json6
32 files changed, 810 insertions, 278 deletions
diff --git a/docs/img/argaze_load_gui.png b/docs/img/argaze_load_gui.png
new file mode 100644
index 0000000..b8874b2
--- /dev/null
+++ b/docs/img/argaze_load_gui.png
Binary files differ
diff --git a/docs/img/argaze_load_gui_random.png b/docs/img/argaze_load_gui_random.png
new file mode 100644
index 0000000..c95a9f5
--- /dev/null
+++ b/docs/img/argaze_load_gui_random.png
Binary files differ
diff --git a/docs/img/argaze_load_gui_random_pipeline.png b/docs/img/argaze_load_gui_random_pipeline.png
new file mode 100644
index 0000000..210d410
--- /dev/null
+++ b/docs/img/argaze_load_gui_random_pipeline.png
Binary files differ
diff --git a/docs/img/eye_tracker_context.png b/docs/img/eye_tracker_context.png
new file mode 100644
index 0000000..638e9a6
--- /dev/null
+++ b/docs/img/eye_tracker_context.png
Binary files differ
diff --git a/docs/img/pipeline_input_context.png b/docs/img/pipeline_input_context.png
deleted file mode 100644
index 8c195ea..0000000
--- a/docs/img/pipeline_input_context.png
+++ /dev/null
Binary files differ
diff --git a/docs/index.md b/docs/index.md
index 2d00d16..00b8ed7 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -7,20 +7,26 @@ title: What is ArGaze?
**Useful links**: [Installation](installation.md) | [Source Repository](https://gitpub.recherche.enac.fr/argaze) | [Issue Tracker](https://git.recherche.enac.fr/projects/argaze/issues) | [Contact](mailto:argaze-contact@recherche.enac.fr)
**ArGaze** is an open and flexible Python software library designed to provide a unified and modular approach to gaze analysis or gaze interaction.
-**ArGaze** facilitates **real-time and/or post-processing analysis** for both **screen-based and head-mounted** eye tracking systems.
+
By offering a wide array of gaze metrics and supporting easy extension to incorporate additional metrics, **ArGaze** empowers researchers and practitioners to explore novel analytical approaches efficiently.
![ArGaze pipeline](img/argaze_pipeline.png)
+## Eye tracking context
+
+**ArGaze** facilitates the integration of both **screen-based and head-mounted** eye tracking systems for **real-time and/or post-processing analysis**.
+
+[Learn how to handle various eye tracking context by reading the dedicated user guide section](./user_guide/eye_tracking_context/introduction.md).
+
## Gaze analysis pipeline
-**ArGaze** provides an extensible modules library, allowing to select application-specific algorithms at each pipeline step:
+Once incoming eye tracking data available, **ArGaze** provides an extensible modules library, allowing to select application-specific algorithms at each pipeline step:
* **Fixation/Saccade identification**: dispersion threshold identification, velocity threshold identification, etc.
* **Area Of Interest (AOI) matching**: focus point inside, deviation circle coverage, etc.
* **Scan path analysis**: transition matrix, entropy, explore/exploit ratio, etc.
-Once the incoming data is formatted as required, all those gaze analysis features can be used with any screen-based eye tracker devices.
+All those gaze analysis features can be used with any screen-based eye tracker devices.
[Learn how to build gaze analysis pipelines for various use cases by reading the dedicated user guide section](./user_guide/gaze_analysis_pipeline/introduction.md).
diff --git a/docs/user_guide/eye_tracking_context/advanced_topics/context_definition.md b/docs/user_guide/eye_tracking_context/advanced_topics/context_definition.md
new file mode 100644
index 0000000..99b6c7a
--- /dev/null
+++ b/docs/user_guide/eye_tracking_context/advanced_topics/context_definition.md
@@ -0,0 +1,193 @@
+Define a context class
+======================
+
+The [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext) class defines a generic base class interface to handle incoming eye tracker data before to pass them to a processing pipeline according to [Python context manager feature](https://docs.python.org/3/reference/datamodel.html#context-managers).
+
+The [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext) class interface provides playback features to stop or pause processings, performance assement features to measure how many times processings are called and the time spent by the process.
+
+Besides, there is also a [LiveProcessingContext](../../../argaze.md/#argaze.ArFeatures.LiveProcessingContext) class that inherits from [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext) and that defines an abstract *calibrate* method to write specific device calibration process.
+
+In the same way, there is a [PostProcessingContext](../../../argaze.md/#argaze.ArFeatures.PostProcessingContext) class that inherits from [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext) and that defines abstract *previous* and *next* playback methods to move into record's frames and also defines *duration* and *progression* properties to get information about a record length and processing advancment.
+
+Finally, a specific eye tracking context can be defined into a Python file by writing a class that inherits either from [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext), [LiveProcessingContext](../../../argaze.md/#argaze.ArFeatures.LiveProcessingContext) or [PostProcessingContext](../../../argaze.md/#argaze.ArFeatures.PostProcessingContext) class.
+
+## Write live processing context
+
+Here is a live processing context example that processes gaze positions and camera images in two separated threads:
+
+```python
+from argaze import ArFeatures, DataFeatures
+
+class LiveProcessingExample(ArFeatures.LiveProcessingContext):
+
+ @DataFeatures.PipelineStepInit
+ def __init__(self, **kwargs):
+
+ # Init LiveProcessingContext class
+ super().__init__()
+
+ # Init private attribute
+ self.__parameter = ...
+
+ @property
+ def parameter(self):
+ """Any context specific parameter."""
+ return self.__parameter
+
+ @parameter.setter
+ def parameter(self, parameter):
+ self.__parameter = parameter
+
+ @DataFeatures.PipelineStepEnter
+ def __enter__(self):
+ """Start context."""
+
+ # Start context according any specific parameter
+ ... self.parameter
+
+ # Start a gaze position processing thread
+ self.__gaze_thread = threading.Thread(target = self.__gaze_position_processing)
+ self.__gaze_thread.start()
+
+ # Start a camera image processing thread if applicable
+ self.__camera_thread = threading.Thread(target = self.__camera_image_processing)
+ self.__camera_thread.start()
+
+ return self
+
+ def __gaze_position_processing(self):
+ """Process gaze position."""
+
+ # Processing loop
+ while self.is_running():
+
+ # Pause processing
+ if not self.is_paused():
+
+ # Assuming that timestamp, x and y values are available
+ ...
+
+ # Process timestamped gaze position
+ self._process_gaze_position(timestamp = timestamp, x = x, y = y)
+
+ # Wait some time eventually
+ ...
+
+ def __camera_image_processing(self):
+ """Process camera image if applicable."""
+
+ # Processing loop
+ while self.is_running():
+
+ # Pause processing
+ if not self.is_paused():
+
+ # Assuming that timestamp, camera_image are available
+ ...
+
+ # Process timestamped camera image
+ self._process_camera_image(timestamp = timestamp, image = camera_image)
+
+ # Wait some time eventually
+ ...
+
+ @DataFeatures.PipelineStepExit
+ def __exit__(self, exception_type, exception_value, exception_traceback):
+ """End context."""
+
+ # Stop processing loops
+ self.stop()
+
+ # Stop processing threads
+ threading.Thread.join(self.__gaze_thread)
+ threading.Thread.join(self.__camera_thread)
+
+ def calibrate(self):
+ """Handle device calibration process."""
+
+ ...
+```
+
+## Write post processing context
+
+Here is a post processing context example that processes gaze positions and camera images in a same thread:
+
+```python
+from argaze import ArFeatures, DataFeatures
+
+class PostProcessingExample(ArFeatures.PostProcessingContext):
+
+ @DataFeatures.PipelineStepInit
+ def __init__(self, **kwargs):
+
+ # Init LiveProcessingContext class
+ super().__init__()
+
+ # Init private attribute
+ self.__parameter = ...
+
+ @property
+ def parameter(self):
+ """Any context specific parameter."""
+ return self.__parameter
+
+ @parameter.setter
+ def parameter(self, parameter):
+ self.__parameter = parameter
+
+ @DataFeatures.PipelineStepEnter
+ def __enter__(self):
+ """Start context."""
+
+ # Start context according any specific parameter
+ ... self.parameter
+
+ # Start a reading data thread
+ self.__read_thread = threading.Thread(target = self.__data_reading)
+ self.__read_thread.start()
+
+ return self
+
+ def __data_reading(self):
+ """Process gaze position and camera image if applicable."""
+
+ # Processing loop
+ while self.is_running():
+
+ # Pause processing
+ if not self.is_paused():
+
+ # Assuming that timestamp, camera_image are available
+ ...
+
+ # Process timestamped camera image
+ self._process_camera_image(timestamp = timestamp, image = camera_image)
+
+ # Assuming that timestamp, x and y values are available
+ ...
+
+ # Process timestamped gaze position
+ self._process_gaze_position(timestamp = timestamp, x = x, y = y)
+
+ # Wait some time eventually
+ ...
+
+ @DataFeatures.PipelineStepExit
+ def __exit__(self, exception_type, exception_value, exception_traceback):
+ """End context."""
+
+ # Stop processing loops
+ self.stop()
+
+ # Stop processing threads
+ threading.Thread.join(self.__read_thread)
+
+ def previous(self):
+ """Go to previous camera image frame."""
+ ...
+
+ def next(self):
+ """Go to next camera image frame."""
+ ...
+```
+
diff --git a/docs/user_guide/eye_tracking_context/advanced_topics/scripting.md b/docs/user_guide/eye_tracking_context/advanced_topics/scripting.md
new file mode 100644
index 0000000..8753eb6
--- /dev/null
+++ b/docs/user_guide/eye_tracking_context/advanced_topics/scripting.md
@@ -0,0 +1,106 @@
+Scritp the context
+==================
+
+Context objects are accessible from a Python script.
+
+## Load configuration from JSON file
+
+A context configuration can be loaded from a JSON file using the [*load*](../../../argaze.md/#argaze.load) function.
+
+```python
+from argaze import load
+
+# Load a context
+with load(configuration_filepath) as context:
+
+ while context.is_running():
+
+ # Do something with context
+ ...
+
+ # Wait some time eventually
+ ...
+```
+
+!!! note
+ The **with** statement enables context by calling its **enter** method then ensures that its **exit** method is always called at the end.
+
+## Load configuration from dictionary
+
+A context configuration can be loaded from a Python dictionary using the [*from_dict*](../../../argaze.md/#argaze.DataFeatures.from_dict) function.
+
+```python
+from argaze import DataFeatures
+
+import my_package
+
+# Set working directory to enable relative file path loading
+DataFeatures.set_working_directory('path/to/folder')
+
+# Edit a dict with context configuration
+configuration = {
+ "name": "My context",
+ "parameter": ...,
+ "pipeline": ...
+}
+
+# Load a context from a package
+with DataFeatures.from_dict(my_package.MyContext, configuration) as context:
+
+ while context.is_running():
+
+ # Do something with context
+ ...
+
+ # Wait some time eventually
+ ...
+```
+
+## Manage context
+
+Check the context or the pipeline type to adapt features.
+
+```python
+from argaze import ArFeatures
+
+# Assuming the context is loaded and is running
+...
+
+ # Check context type
+
+ # Live processing case: calibration method is available
+ if issubclass(type(context), ArFeatures.LiveProcessingContext):
+ ...
+
+ # Post processing case: more playback methods are available
+ if issubclass(type(context), ArFeatures.PostProcessingContext):
+ ...
+
+ # Check pipeline type
+
+ # Screen-based case: only gaze positions are processes
+ if issubclass(type(context.pipeline), ArFeatures.ArFrame):
+ ...
+
+ # Head-mounted case: camera images also processes
+ if issubclass(type(context.pipeline), ArFeatures.ArCamera):
+ ...
+```
+
+## Display context
+
+The context image can be displayed in low priority to not block pipeline processing.
+
+```python
+# Assuming the context is loaded and is running
+...
+
+ # Display context if the pipeline is available
+ try:
+
+ ... = context.image(wait = False)
+
+ except DataFeatures.SharedObjectBusy:
+
+ pass
+```
diff --git a/docs/user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md b/docs/user_guide/eye_tracking_context/advanced_topics/timestamped_gaze_positions_edition.md
index 026d287..340dbaf 100644
--- a/docs/user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md
+++ b/docs/user_guide/eye_tracking_context/advanced_topics/timestamped_gaze_positions_edition.md
@@ -3,7 +3,7 @@ Edit timestamped gaze positions
Whatever eye data comes from a file on disk or from a live stream, timestamped gaze positions are required before going further.
-![Timestamped gaze positions](../../img/timestamped_gaze_positions.png)
+![Timestamped gaze positions](../../../img/timestamped_gaze_positions.png)
## Import timestamped gaze positions from CSV file
@@ -28,7 +28,7 @@ for timestamped_gaze_position in ts_gaze_positions:
## Edit timestamped gaze positions from live stream
-Real-time gaze positions can be edited thanks to the [GazePosition](../../argaze.md/#argaze.GazeFeatures.GazePosition) class.
+Real-time gaze positions can be edited thanks to the [GazePosition](../../../argaze.md/#argaze.GazeFeatures.GazePosition) class.
Besides, timestamps can be edited from the incoming data stream or, if not available, they can be edited thanks to the Python [time package](https://docs.python.org/3/library/time.html).
```python
@@ -64,12 +64,3 @@ start_time = time.time()
!!! warning "Free time unit"
Timestamps can either be integers or floats, seconds, milliseconds or what ever you need. The only concern is that all time values used in further configurations have to be in the same unit.
-
-<!--
-!!! note "Eyetracker connectors"
-
- [Read the use cases section to discover examples using specific eyetrackers](./user_cases/introduction.md).
-!-->
-
-!!! note ""
- Now we have timestamped gaze positions at expected format, read the next chapter to start learning [how to analyze them](./configuration_and_execution.md). \ No newline at end of file
diff --git a/docs/user_guide/eye_tracking_context/configuration_and_execution.md b/docs/user_guide/eye_tracking_context/configuration_and_execution.md
new file mode 100644
index 0000000..f13c6a2
--- /dev/null
+++ b/docs/user_guide/eye_tracking_context/configuration_and_execution.md
@@ -0,0 +1,65 @@
+Edit and execute context
+========================
+
+The [utils.contexts module](../../argaze.md/#argaze.utils.contexts) provides ready-made contexts like:
+
+* [Tobii Pro Glasses 2](context_modules/tobii_pro_glasses_2.md) live stream and post processing contexts,
+* [Pupil Labs](context_modules/pupil_labs.md) live stream context,
+* [OpenCV](context_modules/opencv.md) window cursor position and movie processing,
+* [Random](context_modules/random.md) gaze position generator.
+
+## Edit JSON configuration
+
+Here is a JSON configuration that loads a [Random.GazePositionGenerator](../../argaze.md/#argaze.utils.contexts.Random.GazePositionGenerator) context:
+
+```json
+{
+ "argaze.utils.contexts.Random.GazePositionGenerator": {
+ "name": "Random gaze position generator",
+ "range": [1280, 720],
+ "pipeline": {
+ "argaze.ArFeatures.ArFrame": {
+ "size": [1280, 720]
+ }
+ }
+ }
+}
+```
+
+Let's understand the meaning of each JSON entry.
+
+### argaze.utils.contexts.Random.GazePositionGenerator
+
+The class name of the object being loaded from the [utils.contexts module](../../argaze.md/#argaze.utils.contexts).
+
+### *name*
+
+The name of the [ArContext](../../argaze.md/#argaze.ArFeatures.ArContext). Basically useful for visualization purposes.
+
+### *range*
+
+The range of the gaze position being generated. This property is specific to the [Random.GazePositionGenerator](../../argaze.md/#argaze.utils.contexts.Random.GazePositionGenerator) class.
+
+### *pipeline*
+
+A minimal gaze processing pipeline that only draws last gaze position.
+
+## Context execution
+
+A context can be loaded from a JSON configuration file using the [*load* command](../utils/main_commands.md).
+
+```shell
+python -m argaze load CONFIGURATION
+```
+
+This command should open a GUI window with a random yellow dot inside.
+
+![ArGaze load GUI](../../img/argaze_load_gui_random.png)
+
+!!! note ""
+
+ At this point, it is possible to load any ready-made context from [utils.contexts](../../argaze.md/#argaze.utils.contexts) module.
+
+ However, the incoming gaze positions are not processed and gaze mapping would not be available for head-mounted eye tracker context.
+
+ Read the [gaze analysis pipeline section](../gaze_analysis_pipeline/introduction.md) to learn how to process gaze positions then, the [ArUco markers pipeline section](../aruco_marker_pipeline/introduction.md) to learn how to enable gaze mapping with an ArUco markers setup.
diff --git a/docs/user_guide/eye_tracking_context/context_modules/opencv.md b/docs/user_guide/eye_tracking_context/context_modules/opencv.md
new file mode 100644
index 0000000..7244cd4
--- /dev/null
+++ b/docs/user_guide/eye_tracking_context/context_modules/opencv.md
@@ -0,0 +1,47 @@
+OpenCV
+======
+
+ArGaze provides a ready-made contexts to process cursor position over Open CV window and process movie images.
+
+To select a desired context, the JSON samples have to be edited and saved inside an [ArContext configuration](../configuration_and_execution.md) file.
+Notice that the *pipeline* entry is mandatory.
+
+```json
+{
+ JSON sample
+ "pipeline": ...
+}
+```
+
+Read more about [ArContext base class in code reference](../../../argaze.md/#argaze.ArFeatures.ArContext).
+
+## Cursor
+
+::: argaze.utils.contexts.OpenCV.Cursor
+
+### JSON sample
+
+```json
+{
+ "argaze.utils.contexts.OpenCV.Cursor": {
+ "name": "Open CV cursor",
+ "pipeline": ...
+ }
+}
+```
+
+## Movie
+
+::: argaze.utils.contexts.OpenCV.Movie
+
+### JSON sample
+
+```json
+{
+ "argaze.utils.contexts.OpenCV.Movie": {
+ "name": "Open CV cursor",
+ "path": "./src/argaze/utils/demo/tobii_record/segments/1/fullstream.mp4",
+ "pipeline": ...
+ }
+}
+```
diff --git a/docs/user_guide/eye_tracking_context/context_modules/pupil_labs.md b/docs/user_guide/eye_tracking_context/context_modules/pupil_labs.md
new file mode 100644
index 0000000..d2ec336
--- /dev/null
+++ b/docs/user_guide/eye_tracking_context/context_modules/pupil_labs.md
@@ -0,0 +1,32 @@
+Pupil Labs
+==========
+
+ArGaze provides a ready-made context to work with Pupil Labs devices.
+
+To select a desired context, the JSON samples have to be edited and saved inside an [ArContext configuration](../configuration_and_execution.md) file.
+Notice that the *pipeline* entry is mandatory.
+
+```json
+{
+ JSON sample
+ "pipeline": ...
+}
+```
+
+Read more about [ArContext base class in code reference](../../../argaze.md/#argaze.ArFeatures.ArContext).
+
+## Live Stream
+
+::: argaze.utils.contexts.PupilLabs.LiveStream
+
+### JSON sample
+
+```json
+{
+ "argaze.utils.contexts.PupilLabs.LiveStream": {
+ "name": "Pupil Labs live stream",
+ "project": "my_experiment",
+ "pipeline": ...
+ }
+}
+```
diff --git a/docs/user_guide/eye_tracking_context/context_modules/random.md b/docs/user_guide/eye_tracking_context/context_modules/random.md
new file mode 100644
index 0000000..89d7501
--- /dev/null
+++ b/docs/user_guide/eye_tracking_context/context_modules/random.md
@@ -0,0 +1,32 @@
+Random
+======
+
+ArGaze provides a ready-made context to generate random gaze positions.
+
+To select a desired context, the JSON samples have to be edited and saved inside an [ArContext configuration](../configuration_and_execution.md) file.
+Notice that the *pipeline* entry is mandatory.
+
+```json
+{
+ JSON sample
+ "pipeline": ...
+}
+```
+
+Read more about [ArContext base class in code reference](../../../argaze.md/#argaze.ArFeatures.ArContext).
+
+## Gaze Position Generator
+
+::: argaze.utils.contexts.Random.GazePositionGenerator
+
+### JSON sample
+
+```json
+{
+ "argaze.utils.contexts.Random.GazePositionGenerator": {
+ "name": "Random gaze position generator",
+ "range": [1280, 720],
+ "pipeline": ...
+ }
+}
+```
diff --git a/docs/user_guide/eye_tracking_context/context_modules/tobii_pro_glasses_2.md b/docs/user_guide/eye_tracking_context/context_modules/tobii_pro_glasses_2.md
new file mode 100644
index 0000000..fba6931
--- /dev/null
+++ b/docs/user_guide/eye_tracking_context/context_modules/tobii_pro_glasses_2.md
@@ -0,0 +1,59 @@
+Tobii Pro Glasses 2
+===================
+
+ArGaze provides a ready-made context to work with Tobii Pro Glasses 2 devices.
+
+To select a desired context, the JSON samples have to be edited and saved inside an [ArContext configuration](../configuration_and_execution.md) file.
+Notice that the *pipeline* entry is mandatory.
+
+```json
+{
+ JSON sample
+ "pipeline": ...
+}
+```
+
+Read more about [ArContext base class in code reference](../../../argaze.md/#argaze.ArFeatures.ArContext).
+
+## Live Stream
+
+::: argaze.utils.contexts.TobiiProGlasses2.LiveStream
+
+### JSON sample
+
+```json
+{
+ "argaze.utils.contexts.TobiiProGlasses2.LiveStream": {
+ "name": "Tobii Pro Glasses 2 live stream",
+ "address": "10.34.0.17",
+ "project": "my_experiment",
+ "participant": "subject-A",
+ "configuration": {
+ "sys_ec_preset": "Indoor",
+ "sys_sc_width": 1920,
+ "sys_sc_height": 1080,
+ "sys_sc_fps": 25,
+ "sys_sc_preset": "Auto",
+ "sys_et_freq": 50,
+ "sys_mems_freq": 100
+ },
+ "pipeline": ...
+ }
+}
+```
+
+## Post Processing
+
+::: argaze.utils.contexts.TobiiProGlasses2.PostProcessing
+
+### JSON sample
+
+```json
+{
+ "argaze.utils.contexts.TobiiProGlasses2.PostProcessing" : {
+ "name": "Tobii Pro Glasses 2 post-processing",
+ "segment": "./src/argaze/utils/demo/tobii_record/segments/1",
+ "pipeline": ...
+ }
+}
+```
diff --git a/docs/user_guide/eye_tracking_context/introduction.md b/docs/user_guide/eye_tracking_context/introduction.md
new file mode 100644
index 0000000..8fe6c81
--- /dev/null
+++ b/docs/user_guide/eye_tracking_context/introduction.md
@@ -0,0 +1,18 @@
+Overview
+========
+
+This section explains how to handle eye tracker data from various sources as live streams or archived files before to passing them to a processing pipeline. Those various usages are covered by the notion of **eye tracking context**.
+
+To use a ready-made eye tracking context, you only need to know:
+
+* [How to edit and execute a context](configuration_and_execution.md)
+
+More advanced features are also explained like:
+
+* [How to script context](./advanced_topics/scripting.md),
+* [How to define a context](./advanced_topics/context_definition.md)
+
+To get deeper in how context works, the schema below mentions *enter* and *exit* methods which are related to the notion of [Python context manager](https://docs.python.org/3/reference/datamodel.html#context-managers).
+
+![ArContext class](../../../img/eye_tracker_context.png)
+
diff --git a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md
index 026cb3f..f3ec6cd 100644
--- a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md
+++ b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md
@@ -66,7 +66,28 @@ from argaze import ArFeatures
...
```
-## Pipeline execution updates
+## Pipeline execution
+
+Timestamped [GazePositions](../../argaze.md/#argaze.GazeFeatures.GazePosition) have to be passed one by one to the [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method to execute the whole instantiated pipeline.
+
+!!! warning "Mandatory"
+
+ The [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method must be called from a *try* block to catch pipeline exceptions.
+
+```python
+# Assuming that timestamped gaze positions are available
+...
+
+ try:
+
+ # Look ArFrame at a timestamped gaze position
+ ar_frame.look(timestamped_gaze_position)
+
+ # Do something with pipeline exception
+ except Exception as e:
+
+ ...
+```
Calling [ArFrame.look](../../../argaze.md/#argaze.ArFeatures.ArFrame.look) method leads to update many data into the pipeline.
@@ -186,3 +207,34 @@ ar_frame_image = ar_frame.image(**image_parameters)
# Do something with ArFrame image
...
```
+
+Then, [ArFrame.image](../../argaze.md/#argaze.ArFeatures.ArFrame.image) method can be called in various situations.
+
+### Live window display
+
+While timestamped gaze positions are processed by [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method, it is possible to display the [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) image thanks to the [OpenCV package](https://pypi.org/project/opencv-python/).
+
+```python
+import cv2
+
+def main():
+
+ # Assuming ArFrame is loaded
+ ...
+
+ # Create a window to display ArFrame
+ cv2.namedWindow(ar_frame.name, cv2.WINDOW_AUTOSIZE)
+
+ # Assuming that timestamped gaze positions are being processed by ArFrame.look method
+ ...
+
+ # Update ArFrame image display
+ cv2.imshow(ar_frame.name, ar_frame.image())
+
+ # Wait 10 ms
+ cv2.waitKey(10)
+
+if __name__ == '__main__':
+
+ main()
+``` \ No newline at end of file
diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md
index be27c69..2b64091 100644
--- a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md
+++ b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md
@@ -5,7 +5,7 @@ Once [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) is [configured](confi
![Layer](../../img/ar_layer.png)
-## Add ArLayer to ArFrame JSON configuration file
+## Add ArLayer to ArFrame JSON configuration
The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class defines a space where to match fixations with AOI and inside which those matches need to be analyzed.
diff --git a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md
index 57a9d71..58919e5 100644
--- a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md
+++ b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md
@@ -1,15 +1,15 @@
-Load and execute pipeline
+Edit and execute pipeline
=========================
The [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) class defines a rectangular area where timestamped gaze positions are projected in and inside which they need to be analyzed.
-![Frame](../../img/ar_frame.png)
+Once defined, a gaze analysis pipeline needs to embedded inside a context that will provides it gaze positions to process.
-## Load JSON configuration file
+![Frame](../../img/ar_frame.png)
-An [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline can be loaded from a JSON configuration file thanks to the [argaze.load](../../argaze.md/#argaze.load) package method.
+## Edit JSON configuration
-Here is a simple JSON [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) configuration file example:
+Here is a simple JSON [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) configuration example:
```json
{
@@ -35,19 +35,7 @@ Here is a simple JSON [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) conf
}
```
-Then, here is how to load the JSON file:
-
-```python
-import argaze
-
-# Load ArFrame
-with argaze.load('./configuration.json') as ar_frame:
-
- # Do something with ArFrame
- ...
-```
-
-Now, let's understand the meaning of each JSON entry.
+Let's understand the meaning of each JSON entry.
### argaze.ArFeatures.ArFrame
@@ -103,28 +91,32 @@ In the example file, the chosen analysis algorithms are the [Basic](../../argaze
## Pipeline execution
-Timestamped [GazePositions](../../argaze.md/#argaze.GazeFeatures.GazePosition) have to be passed one by one to the [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method to execute the whole instantiated pipeline.
+A pipeline needs to be embedded into a context to be executed.
-!!! warning "Mandatory"
+Copy the gaze analysis pipeline configuration defined above inside the following context configuration.
- The [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method must be called from a *try* block to catch pipeline exceptions.
+```json
+{
+ "argaze.utils.contexts.Random.GazePositionGenerator": {
+ "name": "Random gaze position generator",
+ "range": [1920, 1080],
+ "pipeline": JSON CONFIGURATION
+ }
+}
+```
-```python
-# Assuming that timestamped gaze positions are available
-...
+Then, use the [*load* command](../utils/main_commands.md) to execute the context.
- try:
+```shell
+python -m argaze load CONFIGURATION
+```
- # Look ArFrame at a timestamped gaze position
- ar_frame.look(timestamped_gaze_position)
+This command should open a GUI window with a random yellow dot and identified fixations circles.
+
+![ArGaze load GUI](../../img/argaze_load_gui_random_pipeline.png)
- # Do something with pipeline exception
- except Exception as e:
-
- ...
-```
!!! note ""
- At this point, the [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method only processes gaze movement identification and scan path analysis without any AOI neither any recording or visualization supports.
+ At this point, the pipeline only processes gaze movement identification and scan path analysis without any AOI neither any recording or visualization supports.
Read the next chapters to learn how to [describe AOI](aoi_2d_description.md), [add AOI analysis](aoi_analysis.md), [record gaze analysis](recording.md) and [visualize pipeline steps](visualization.md). \ No newline at end of file
diff --git a/docs/user_guide/gaze_analysis_pipeline/introduction.md b/docs/user_guide/gaze_analysis_pipeline/introduction.md
index c12f669..29eeed5 100644
--- a/docs/user_guide/gaze_analysis_pipeline/introduction.md
+++ b/docs/user_guide/gaze_analysis_pipeline/introduction.md
@@ -1,7 +1,10 @@
Overview
========
-This section explains how to create gaze analysis pipelines for various use cases.
+This section explains how to process incoming gaze positions through a **gaze analysis pipeline**.
+
+!!! warning "Read eye tracking context section before"
+ This section assumes that the incoming gaze positions are provided by an [eye tracking context](../eye_tracking_context/introduction.md).
First, let's look at the schema below: it gives an overview of the main notions involved in the following chapters.
@@ -9,8 +12,7 @@ First, let's look at the schema below: it gives an overview of the main notions
To build your own gaze analysis pipeline, you need to know:
-* [How to edit timestamped gaze positions](timestamped_gaze_positions_edition.md),
-* [How to load and execute gaze analysis pipeline](configuration_and_execution.md),
+* [How to edit and execute a pipeline](configuration_and_execution.md),
* [How to describe AOI](aoi_2d_description.md),
* [How to enable AOI analysis](aoi_analysis.md),
* [How to visualize pipeline steps outputs](visualization.md),
@@ -20,6 +22,7 @@ To build your own gaze analysis pipeline, you need to know:
More advanced features are also explained like:
+* [How to edit timestamped gaze positions](advanced_topics/timestamped_gaze_positions_edition.md),
* [How to script gaze analysis pipeline](advanced_topics/scripting.md),
* [How to load module from another package](advanced_topics/module_loading.md).
* [How to calibrate gaze position](advanced_topics/gaze_position_calibration.md).
diff --git a/docs/user_guide/gaze_analysis_pipeline/visualization.md b/docs/user_guide/gaze_analysis_pipeline/visualization.md
index 6b9805c..32395c3 100644
--- a/docs/user_guide/gaze_analysis_pipeline/visualization.md
+++ b/docs/user_guide/gaze_analysis_pipeline/visualization.md
@@ -5,7 +5,7 @@ Visualization is not a pipeline step, but each [ArFrame](../../argaze.md/#argaze
![ArFrame visualization](../../img/visualization.png)
-## Add image parameters to ArFrame JSON configuration file
+## Add image parameters to ArFrame JSON configuration
[ArFrame.image](../../argaze.md/#argaze.ArFeatures.ArFrame.image) method parameters can be configured thanks to a dedicated JSON entry.
@@ -82,37 +82,6 @@ Here is an extract from the JSON ArFrame configuration file with a sample where
Most of *image_parameters* entries work if related ArFrame/ArLayer pipeline steps are enabled.
For example, a JSON *draw_scan_path* entry needs GazeMovementIdentifier and ScanPath steps to be enabled.
-Then, [ArFrame.image](../../argaze.md/#argaze.ArFeatures.ArFrame.image) method can be called in various situations.
-
-## Live window display
-
-While timestamped gaze positions are processed by [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method, it is possible to display the [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) image thanks to the [OpenCV package](https://pypi.org/project/opencv-python/).
-
-```python
-import cv2
-
-def main():
-
- # Assuming ArFrame is loaded
- ...
-
- # Create a window to display ArFrame
- cv2.namedWindow(ar_frame.name, cv2.WINDOW_AUTOSIZE)
-
- # Assuming that timestamped gaze positions are being processed by ArFrame.look method
- ...
-
- # Update ArFrame image display
- cv2.imshow(ar_frame.name, ar_frame.image())
-
- # Wait 10 ms
- cv2.waitKey(10)
-
-if __name__ == '__main__':
-
- main()
-```
-
!!! note "Export to video file"
Video exportation is detailed in [gaze analysis recording chapter](recording.md). \ No newline at end of file
diff --git a/docs/user_guide/pipeline_input_context/configuration_and_connection.md b/docs/user_guide/pipeline_input_context/configuration_and_connection.md
deleted file mode 100644
index 4aac88a..0000000
--- a/docs/user_guide/pipeline_input_context/configuration_and_connection.md
+++ /dev/null
@@ -1,35 +0,0 @@
-Load and connect a context
-==========================
-
-Once an [ArContext is defined](context_definition.md), it have to be connected to a pipeline.
-
-# Load JSON configuration file
-
-An [ArContext](../../argaze.md/#argaze.ArFeatures.ArContext) can be loaded from a JSON configuration file thanks to the [argaze.load](../../argaze.md/#argaze.load) package method.
-
-Here is a JSON configuration file related to the [previously defined Example context](context_definition.md):
-
-```json
-{
- "my_context.Example": {
- "name": "My example context",
- "parameter": ...,
- "pipeline": "pipeline.json"
- }
-}
-```
-
-Then, here is how to load the JSON file:
-
-```python
-import argaze
-
-# Load ArContext
-with argaze.load('./configuration.json') as ar_context:
-
- # Do something with ArContext
- ...
-```
-
-!!! note
- There is nothing to do to execute a loaded context as it is handled inside its own **__enter__** method.
diff --git a/docs/user_guide/pipeline_input_context/context_definition.md b/docs/user_guide/pipeline_input_context/context_definition.md
deleted file mode 100644
index 7d30438..0000000
--- a/docs/user_guide/pipeline_input_context/context_definition.md
+++ /dev/null
@@ -1,57 +0,0 @@
-Define a context class
-======================
-
-The [ArContext](../../argaze.md/#argaze.ArFeatures.ArContext) class defines a generic class interface to handle pipeline inputs according to [Python context manager feature](https://docs.python.org/3/reference/datamodel.html#context-managers).
-
-# Write Python context file
-
-A specific [ArContext](../../argaze.md/#argaze.ArFeatures.ArContext) can be defined into a Python file.
-
-Here is an example context defined into *my_context.py* file:
-
-```python
-from argaze import ArFeatures, DataFeatures
-
-class Example(ArFeatures.ArContext):
-
- @DataFeatures.PipelineStepInit
- def __init__(self, **kwargs):
-
- # Init ArContext class
- super().__init__()
-
- # Init private attribute
- self.__parameter = ...
-
- @property
- def parameter(self):
- """Any context specific parameter."""
- return self.__parameter
-
- @parameter.setter
- def parameter(self, parameter):
- self.__parameter = parameter
-
- @DataFeatures.PipelineStepEnter
- def __enter__(self):
-
- # Start context according any specific parameter
- ... self.parameter
-
- # Assuming that timestamp, x and y values are available
- ...
-
- # Process timestamped gaze position
- self._process_gaze_position(timestamp = timestamp, x = x, y = y)
-
- @DataFeatures.PipelineStepExit
- def __exit__(self, exception_type, exception_value, exception_traceback):
-
- # End context
- ...
-```
-
-!!! note ""
-
- The next chapter explains how to [load a context to connect it with a pipeline](configuration_and_connection.md).
- \ No newline at end of file
diff --git a/docs/user_guide/pipeline_input_context/introduction.md b/docs/user_guide/pipeline_input_context/introduction.md
deleted file mode 100644
index e31ad54..0000000
--- a/docs/user_guide/pipeline_input_context/introduction.md
+++ /dev/null
@@ -1,24 +0,0 @@
-Overview
-========
-
-This section explains how to connect [gaze analysis](../gaze_analysis_pipeline/introduction.md) or [augmented reality](../aruco_marker_pipeline/introduction.md) pipelines with various input contexts.
-
-First, let's look at the schema below: it gives an overview of the main notions involved in the following chapters.
-
-![Pipeline input context](../../img/pipeline_input_context.png)
-
-To build your own input context, you need to know:
-
-* [How to define a context class](context_definition.md),
-* [How to load a context to connect with a pipeline](configuration_and_connection.md),
-
-!!! warning "Documentation in progress"
-
- This section is not yet fully done. Please look at the [demonstrations scripts chapter](../utils/demonstrations_scripts.md) to know more about this notion.
-
-<!--
-* [How to stop a context](stop.md),
-* [How to pause and resume a context](pause_and_resume.md),
-* [How to visualize a context](visualization.md),
-* [How to handle pipeline exceptions](exceptions.md)
-!-->
diff --git a/docs/user_guide/utils/demonstrations_scripts.md b/docs/user_guide/utils/demonstrations_scripts.md
index f293980..dd1b8e0 100644
--- a/docs/user_guide/utils/demonstrations_scripts.md
+++ b/docs/user_guide/utils/demonstrations_scripts.md
@@ -11,18 +11,26 @@ Collection of command-line scripts for demonstration purpose.
## Random context
-Load **random_context.json** file to analyze random gaze positions:
+Load **random_context.json** file to process random gaze positions:
```shell
python -m argaze load ./src/argaze/utils/demo/random_context.json
```
-## OpenCV window context
+## OpenCV cursor context
-Load **opencv_window_context.json** file to analyze mouse pointer positions over OpenCV window:
+Load **opencv_cursor_context.json** file to process cursor pointer positions over OpenCV window:
```shell
-python -m argaze load ./src/argaze/utils/demo/opencv_window_context.json
+python -m argaze load ./src/argaze/utils/demo/opencv_cursor_context.json
+```
+
+## OpenCV movie context
+
+Load **opencv_movie_context.json** file to process movie pictures and also cursor pointer positions over OpenCV window:
+
+```shell
+python -m argaze load ./src/argaze/utils/demo/opencv_movie_context.json
```
## Tobii Pro Glasses 2
diff --git a/docs/user_guide/utils/estimate_aruco_markers_pose.md b/docs/user_guide/utils/estimate_aruco_markers_pose.md
new file mode 100644
index 0000000..3d34972
--- /dev/null
+++ b/docs/user_guide/utils/estimate_aruco_markers_pose.md
@@ -0,0 +1,60 @@
+Estimate ArUco markers pose
+===========================
+
+This **ArGaze** application detects ArUco markers inside a movie frame then, export pose estimation as .obj file into a folder.
+
+Firstly, edit **utils/estimate_markers_pose/context.json** file as to select a movie *path*.
+
+```json
+{
+ "argaze.utils.contexts.OpenCV.Movie" : {
+ "name": "ArUco markers pose estimator",
+ "path": "./src/argaze/utils/demo/tobii_record/segments/1/fullstream.mp4",
+ "pipeline": "pipeline.json"
+ }
+}
+```
+
+Sencondly, edit **utils/estimate_markers_pose/pipeline.json** file to setup ArUco camera *size*, ArUco detector *dictionary*, *pose_size* and *pose_ids* attributes.
+
+```json
+{
+ "argaze.ArUcoMarker.ArUcoCamera.ArUcoCamera": {
+ "name": "Full HD Camera",
+ "size": [1920, 1080],
+ "aruco_detector": {
+ "dictionary": "DICT_APRILTAG_16h5",
+ "pose_size": 4,
+ "pose_ids": [],
+ "parameters": {
+ "useAruco3Detection": 1
+ },
+ "observers":{
+ "observers.ArUcoMarkersPoseRecorder": {
+ "output_folder": "_export/records/aruco_markers_group"
+ }
+ }
+ },
+ "sides_mask": 420,
+ "image_parameters": {
+ "background_weight": 1,
+ "draw_gaze_positions": {
+ "color": [0, 255, 255],
+ "size": 4
+ },
+ "draw_detected_markers": {
+ "color": [255, 255, 255],
+ "draw_axes": {
+ "thickness": 4
+ }
+ }
+ }
+ }
+}
+```
+
+Then, launch the application.
+
+```shell
+python -m argaze load ./src/argaze/utils/estimate_markers_pose/context.json
+``` \ No newline at end of file
diff --git a/docs/user_guide/utils/ready-made_scripts.md b/docs/user_guide/utils/main_commands.md
index 892fef8..4dd3434 100644
--- a/docs/user_guide/utils/ready-made_scripts.md
+++ b/docs/user_guide/utils/main_commands.md
@@ -1,15 +1,12 @@
-Ready-made scripts
-==================
+Main commands
+=============
-Collection of command-line scripts to provide useful features.
-
-!!! note
- *Consider that all inline commands below have to be executed at the root of ArGaze package folder.*
+The **ArGaze** package comes with top-level commands.
!!! note
*Use -h option to get command arguments documentation.*
-## Load ArContext JSON configuration
+## Load
Load and execute any [ArContext](../../argaze.md/#argaze.ArFeatures.ArContext) from a JSON CONFIGURATION file
@@ -17,6 +14,10 @@ Load and execute any [ArContext](../../argaze.md/#argaze.ArFeatures.ArContext) f
python -m argaze load CONFIGURATION
```
+This command should open a GUI window to display the image of the context's pipeline.
+
+![ArGaze load GUI](../../img/argaze_load_gui.png)
+
### Send command
Use -p option to enable pipe communication at given address:
@@ -46,24 +47,10 @@ echo "context.pause()" > /tmp/argaze
echo "context.resume()" > /tmp/argaze
```
-## Edit JSON configuration
+## Edit
Modify the content of JSON CONFIGURATION file with another JSON CHANGES file then, save the result into an OUTPUT file
```shell
python -m argaze edit CONFIGURATION CHANGES OUTPUT
```
-
-## Estimate ArUco markers pose
-
-This application detects ArUco markers inside a movie frame then, export pose estimation as .obj file into a folder.
-
-Firstly, edit **utils/estimate_markers_pose/context.json** file as to select a movie *path*.
-
-Sencondly, edit **utils/estimate_markers_pose/pipeline.json** file to setup ArUco detector *dictionary*, *pose_size* and *pose_ids* attributes.
-
-Then, launch the application.
-
-```shell
-python -m argaze load ./src/argaze/utils/estimate_markers_pose/context.json
-``` \ No newline at end of file
diff --git a/mkdocs.yml b/mkdocs.yml
index 2ec7046..17fc65a 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -4,9 +4,20 @@ nav:
- installation.md
- license.md
- User Guide:
+ - Eye Tracking Context:
+ - user_guide/eye_tracking_context/introduction.md
+ - user_guide/eye_tracking_context/configuration_and_execution.md
+ - Context Modules:
+ - user_guide/eye_tracking_context/context_modules/tobii_pro_glasses_2.md
+ - user_guide/eye_tracking_context/context_modules/pupil_labs.md
+ - user_guide/eye_tracking_context/context_modules/opencv.md
+ - user_guide/eye_tracking_context/context_modules/random.md
+ - Advanced Topics:
+ - user_guide/eye_tracking_context/advanced_topics/scripting.md
+ - user_guide/eye_tracking_context/advanced_topics/context_definition.md
+ - user_guide/eye_tracking_context/advanced_topics/timestamped_gaze_positions_edition.md
- Gaze Analysis Pipeline:
- user_guide/gaze_analysis_pipeline/introduction.md
- - user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md
- user_guide/gaze_analysis_pipeline/configuration_and_execution.md
- user_guide/gaze_analysis_pipeline/aoi_2d_description.md
- user_guide/gaze_analysis_pipeline/aoi_analysis.md
@@ -36,13 +47,10 @@ nav:
- user_guide/aruco_marker_pipeline/advanced_topics/scripting.md
- user_guide/aruco_marker_pipeline/advanced_topics/optic_parameters_calibration.md
- user_guide/aruco_marker_pipeline/advanced_topics/aruco_detector_configuration.md
- - Pipeline Input Context:
- - user_guide/pipeline_input_context/introduction.md
- - user_guide/pipeline_input_context/context_definition.md
- - user_guide/pipeline_input_context/configuration_and_connection.md
- utils:
- - user_guide/utils/ready-made_scripts.md
+ - user_guide/utils/main_commands.md
- user_guide/utils/demonstrations_scripts.md
+ - user_guide/utils/estimate_aruco_markers_pose.md
- Use Cases:
- Pilot gaze monitoring:
- use_cases/pilot_gaze_monitoring/introduction.md
@@ -80,6 +88,11 @@ plugins:
annotations_path: brief
show_submodules: true
show_root_toc_entry: false
+ show_if_no_docstring: false
+ modernize_annotations: true
+ filters:
+ - "!^__"
+
watch:
- src/argaze
markdown_extensions:
diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py
index aaac6ed..8d9eceb 100644
--- a/src/argaze/ArFeatures.py
+++ b/src/argaze/ArFeatures.py
@@ -1231,7 +1231,7 @@ class ArCamera(ArFrame):
self.__projection_cache_writer.write( (timestamp, exception) )
- def _read_projection_cache(self, timestamp: int|float):
+ def _read_projection_cache(self, timestamp: int|float) -> bool:
"""Read layers aoi scene from the projection cache.
Parameters:
@@ -1504,7 +1504,7 @@ DEFAULT_ARCONTEXT_IMAGE_PARAMETERS = {
class ArContext(DataFeatures.PipelineStepObject):
"""
- Defines abstract Python context manager to handle incoming gaze data before passing them to a processing pipeline.
+ Defines abstract Python context manager to handle eye tracker data before passing them to a processing pipeline.
"""
# noinspection PyMissingConstructor
diff --git a/src/argaze/utils/contexts/OpenCV.py b/src/argaze/utils/contexts/OpenCV.py
index 111ed8e..273705a 100644
--- a/src/argaze/utils/contexts/OpenCV.py
+++ b/src/argaze/utils/contexts/OpenCV.py
@@ -26,7 +26,12 @@ import cv2
from argaze import ArFeatures, DataFeatures
-class Window(ArFeatures.LiveProcessingContext):
+class Cursor(ArFeatures.ArContext):
+ """Process cursor position over OpenCV window.
+
+ !!! warning
+ It is assumed that an OpenCV window with the same name than the context is used to display context's pipeline image.
+ """
@DataFeatures.PipelineStepInit
def __init__(self, **kwargs):
@@ -37,13 +42,13 @@ class Window(ArFeatures.LiveProcessingContext):
@DataFeatures.PipelineStepEnter
def __enter__(self):
- logging.info('OpenCV window context starts...')
+ logging.info('OpenCV.Cursor context starts...')
- # Create a window to display context
+ # Create a window
cv2.namedWindow(self.name, cv2.WINDOW_AUTOSIZE)
# Init timestamp
- self.__start_time = time.time()
+ self._start_time = time.time()
# Attach mouse event callback to window
cv2.setMouseCallback(self.name, self.__on_mouse_event)
@@ -53,7 +58,7 @@ class Window(ArFeatures.LiveProcessingContext):
@DataFeatures.PipelineStepExit
def __exit__(self, exception_type, exception_value, exception_traceback):
- logging.info('OpenCV window context stops...')
+ logging.info('OpenCV.Cursor context stops...')
# Delete window
cv2.destroyAllWindows()
@@ -61,20 +66,24 @@ class Window(ArFeatures.LiveProcessingContext):
def __on_mouse_event(self, event, x, y, flags, param):
"""Process pointer position."""
- logging.debug('Window.on_mouse_event %i %i', x, y)
+ logging.debug('OpenCV.Cursor.on_mouse_event %i %i', x, y)
if not self.is_paused():
# Process timestamped gaze position
- self._process_gaze_position(timestamp = int((time.time() - self.__start_time) * 1e3), x = x, y = y)
+ self._process_gaze_position(timestamp = int((time.time() - self._start_time) * 1e3), x = x, y = y)
-class Movie(ArFeatures.PostProcessingContext):
+class Movie(Cursor):
+ """Process movie images and cursor position over OpenCV window.
+ !!! warning
+ It is assumed that an OpenCV window with the same name than the context is used to display context's pipeline image.
+ """
@DataFeatures.PipelineStepInit
def __init__(self, **kwargs):
- # Init PostProcessingContext class
+ # Init Cursor class
super().__init__()
# Init private attributes
@@ -109,16 +118,10 @@ class Movie(ArFeatures.PostProcessingContext):
@DataFeatures.PipelineStepEnter
def __enter__(self):
- logging.info('OpenCV movie context starts...')
-
- # Create a window to display context
- cv2.namedWindow(self.name, cv2.WINDOW_AUTOSIZE)
-
- # Init timestamp
- self.__start_time = time.time()
+ logging.info('OpenCV.Movie context starts...')
- # Attach mouse event callback to window
- cv2.setMouseCallback(self.name, self.__on_mouse_event)
+ # Enter in Cursor context
+ super().__enter__()
# Open reading thread
self.__reading_thread = threading.Thread(target=self.__read)
@@ -174,33 +177,23 @@ class Movie(ArFeatures.PostProcessingContext):
@DataFeatures.PipelineStepExit
def __exit__(self, exception_type, exception_value, exception_traceback):
- logging.info('OpenCV movie context stops...')
+ logging.info('OpenCV.Movie context stops...')
+
+ # Exit from Cursor context
+ super().__exit__(exception_type, exception_value, exception_traceback)
# Close data stream
- self._stop_event.set()
+ self.stop()
# Stop reading thread
threading.Thread.join(self.__reading_thread)
- # Delete window
- cv2.destroyAllWindows()
-
- def __on_mouse_event(self, event, x, y, flags, param):
- """Process pointer position."""
-
- logging.debug('Window.on_mouse_event %i %i', x, y)
-
- if not self.is_paused():
-
- # Process timestamped gaze position
- self._process_gaze_position(timestamp = int((time.time() - self.__start_time) * 1e3), x = x, y = y)
-
def refresh(self):
"""Refresh current frame."""
self.__refresh = True
def previous(self):
-
+ """Go to previous frame."""
self.__next_image_index -= 1
# Clip image index
@@ -208,6 +201,7 @@ class Movie(ArFeatures.PostProcessingContext):
self.__next_image_index = 0
def next(self):
+ """Go to next frame."""
self.__next_image_index += 1
@@ -217,13 +211,13 @@ class Movie(ArFeatures.PostProcessingContext):
@property
def duration(self) -> int|float:
- """Get data duration."""
+ """Get movie duration."""
return self.__movie_length / self.__movie_fps
@property
def progression(self) -> float:
- """Get data processing progression between 0 and 1."""
+ """Get movie processing progression between 0 and 1."""
if self.__current_image_index is not None:
diff --git a/src/argaze/utils/contexts/Random.py b/src/argaze/utils/contexts/Random.py
index 29b9830..c7b2187 100644
--- a/src/argaze/utils/contexts/Random.py
+++ b/src/argaze/utils/contexts/Random.py
@@ -71,8 +71,29 @@ class GazePositionGenerator(ArFeatures.ArContext):
# Edit millisecond timestamp
timestamp = int((time.time() - start_time) * 1e3)
- self.__x += random.randint(-10, 10)
- self.__y += random.randint(-10, 10)
+ # Random saccade
+ if random.randint(0, 100) == 0:
+
+ rand_x = random.randint(0, int(self.__range[0] / 2))
+ rand_y = random.randint(0, int(self.__range[1] / 2))
+
+ self.__x += random.randint(-rand_x, rand_x)
+ self.__y += random.randint(-rand_y, rand_y)
+
+ # Random fixation
+ else:
+
+ self.__x += random.randint(-1, 1)
+ self.__y += random.randint(-1, 1)
+
+ # Clip position
+ if self.__x < 0 or self.__x > self.__range[0]:
+
+ self.__x = int(self.range[0] / 2)
+
+ if self.__y < 0 or self.__y > self.__range[1]:
+
+ self.__y = int(self.range[1] / 2)
logging.debug('> timestamp=%i, x=%i, y=%i', timestamp, self.__x, self.__y)
diff --git a/src/argaze/utils/demo/opencv_cursor_context.json b/src/argaze/utils/demo/opencv_cursor_context.json
new file mode 100644
index 0000000..659ffd6
--- /dev/null
+++ b/src/argaze/utils/demo/opencv_cursor_context.json
@@ -0,0 +1,6 @@
+{
+ "argaze.utils.contexts.OpenCV.Cursor" : {
+ "name": "OpenCV cursor",
+ "pipeline": "gaze_analysis_pipeline.json"
+ }
+} \ No newline at end of file
diff --git a/src/argaze/utils/demo/opencv_window_context.json b/src/argaze/utils/demo/opencv_window_context.json
deleted file mode 100644
index d589665..0000000
--- a/src/argaze/utils/demo/opencv_window_context.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "argaze.utils.contexts.OpenCV.Window" : {
- "name": "OpenCV Window",
- "pipeline": "gaze_analysis_pipeline.json"
- }
-} \ No newline at end of file