aboutsummaryrefslogtreecommitdiff
path: root/docs/user_guide/eye_tracking_context/advanced_topics
diff options
context:
space:
mode:
Diffstat (limited to 'docs/user_guide/eye_tracking_context/advanced_topics')
-rw-r--r--docs/user_guide/eye_tracking_context/advanced_topics/context_definition.md193
-rw-r--r--docs/user_guide/eye_tracking_context/advanced_topics/scripting.md106
-rw-r--r--docs/user_guide/eye_tracking_context/advanced_topics/timestamped_gaze_positions_edition.md66
3 files changed, 365 insertions, 0 deletions
diff --git a/docs/user_guide/eye_tracking_context/advanced_topics/context_definition.md b/docs/user_guide/eye_tracking_context/advanced_topics/context_definition.md
new file mode 100644
index 0000000..99b6c7a
--- /dev/null
+++ b/docs/user_guide/eye_tracking_context/advanced_topics/context_definition.md
@@ -0,0 +1,193 @@
+Define a context class
+======================
+
+The [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext) class defines a generic base class interface to handle incoming eye tracker data before to pass them to a processing pipeline according to [Python context manager feature](https://docs.python.org/3/reference/datamodel.html#context-managers).
+
+The [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext) class interface provides playback features to stop or pause processings, performance assement features to measure how many times processings are called and the time spent by the process.
+
+Besides, there is also a [LiveProcessingContext](../../../argaze.md/#argaze.ArFeatures.LiveProcessingContext) class that inherits from [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext) and that defines an abstract *calibrate* method to write specific device calibration process.
+
+In the same way, there is a [PostProcessingContext](../../../argaze.md/#argaze.ArFeatures.PostProcessingContext) class that inherits from [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext) and that defines abstract *previous* and *next* playback methods to move into record's frames and also defines *duration* and *progression* properties to get information about a record length and processing advancment.
+
+Finally, a specific eye tracking context can be defined into a Python file by writing a class that inherits either from [ArContext](../../../argaze.md/#argaze.ArFeatures.ArContext), [LiveProcessingContext](../../../argaze.md/#argaze.ArFeatures.LiveProcessingContext) or [PostProcessingContext](../../../argaze.md/#argaze.ArFeatures.PostProcessingContext) class.
+
+## Write live processing context
+
+Here is a live processing context example that processes gaze positions and camera images in two separated threads:
+
+```python
+from argaze import ArFeatures, DataFeatures
+
+class LiveProcessingExample(ArFeatures.LiveProcessingContext):
+
+ @DataFeatures.PipelineStepInit
+ def __init__(self, **kwargs):
+
+ # Init LiveProcessingContext class
+ super().__init__()
+
+ # Init private attribute
+ self.__parameter = ...
+
+ @property
+ def parameter(self):
+ """Any context specific parameter."""
+ return self.__parameter
+
+ @parameter.setter
+ def parameter(self, parameter):
+ self.__parameter = parameter
+
+ @DataFeatures.PipelineStepEnter
+ def __enter__(self):
+ """Start context."""
+
+ # Start context according any specific parameter
+ ... self.parameter
+
+ # Start a gaze position processing thread
+ self.__gaze_thread = threading.Thread(target = self.__gaze_position_processing)
+ self.__gaze_thread.start()
+
+ # Start a camera image processing thread if applicable
+ self.__camera_thread = threading.Thread(target = self.__camera_image_processing)
+ self.__camera_thread.start()
+
+ return self
+
+ def __gaze_position_processing(self):
+ """Process gaze position."""
+
+ # Processing loop
+ while self.is_running():
+
+ # Pause processing
+ if not self.is_paused():
+
+ # Assuming that timestamp, x and y values are available
+ ...
+
+ # Process timestamped gaze position
+ self._process_gaze_position(timestamp = timestamp, x = x, y = y)
+
+ # Wait some time eventually
+ ...
+
+ def __camera_image_processing(self):
+ """Process camera image if applicable."""
+
+ # Processing loop
+ while self.is_running():
+
+ # Pause processing
+ if not self.is_paused():
+
+ # Assuming that timestamp, camera_image are available
+ ...
+
+ # Process timestamped camera image
+ self._process_camera_image(timestamp = timestamp, image = camera_image)
+
+ # Wait some time eventually
+ ...
+
+ @DataFeatures.PipelineStepExit
+ def __exit__(self, exception_type, exception_value, exception_traceback):
+ """End context."""
+
+ # Stop processing loops
+ self.stop()
+
+ # Stop processing threads
+ threading.Thread.join(self.__gaze_thread)
+ threading.Thread.join(self.__camera_thread)
+
+ def calibrate(self):
+ """Handle device calibration process."""
+
+ ...
+```
+
+## Write post processing context
+
+Here is a post processing context example that processes gaze positions and camera images in a same thread:
+
+```python
+from argaze import ArFeatures, DataFeatures
+
+class PostProcessingExample(ArFeatures.PostProcessingContext):
+
+ @DataFeatures.PipelineStepInit
+ def __init__(self, **kwargs):
+
+ # Init LiveProcessingContext class
+ super().__init__()
+
+ # Init private attribute
+ self.__parameter = ...
+
+ @property
+ def parameter(self):
+ """Any context specific parameter."""
+ return self.__parameter
+
+ @parameter.setter
+ def parameter(self, parameter):
+ self.__parameter = parameter
+
+ @DataFeatures.PipelineStepEnter
+ def __enter__(self):
+ """Start context."""
+
+ # Start context according any specific parameter
+ ... self.parameter
+
+ # Start a reading data thread
+ self.__read_thread = threading.Thread(target = self.__data_reading)
+ self.__read_thread.start()
+
+ return self
+
+ def __data_reading(self):
+ """Process gaze position and camera image if applicable."""
+
+ # Processing loop
+ while self.is_running():
+
+ # Pause processing
+ if not self.is_paused():
+
+ # Assuming that timestamp, camera_image are available
+ ...
+
+ # Process timestamped camera image
+ self._process_camera_image(timestamp = timestamp, image = camera_image)
+
+ # Assuming that timestamp, x and y values are available
+ ...
+
+ # Process timestamped gaze position
+ self._process_gaze_position(timestamp = timestamp, x = x, y = y)
+
+ # Wait some time eventually
+ ...
+
+ @DataFeatures.PipelineStepExit
+ def __exit__(self, exception_type, exception_value, exception_traceback):
+ """End context."""
+
+ # Stop processing loops
+ self.stop()
+
+ # Stop processing threads
+ threading.Thread.join(self.__read_thread)
+
+ def previous(self):
+ """Go to previous camera image frame."""
+ ...
+
+ def next(self):
+ """Go to next camera image frame."""
+ ...
+```
+
diff --git a/docs/user_guide/eye_tracking_context/advanced_topics/scripting.md b/docs/user_guide/eye_tracking_context/advanced_topics/scripting.md
new file mode 100644
index 0000000..8753eb6
--- /dev/null
+++ b/docs/user_guide/eye_tracking_context/advanced_topics/scripting.md
@@ -0,0 +1,106 @@
+Scritp the context
+==================
+
+Context objects are accessible from a Python script.
+
+## Load configuration from JSON file
+
+A context configuration can be loaded from a JSON file using the [*load*](../../../argaze.md/#argaze.load) function.
+
+```python
+from argaze import load
+
+# Load a context
+with load(configuration_filepath) as context:
+
+ while context.is_running():
+
+ # Do something with context
+ ...
+
+ # Wait some time eventually
+ ...
+```
+
+!!! note
+ The **with** statement enables context by calling its **enter** method then ensures that its **exit** method is always called at the end.
+
+## Load configuration from dictionary
+
+A context configuration can be loaded from a Python dictionary using the [*from_dict*](../../../argaze.md/#argaze.DataFeatures.from_dict) function.
+
+```python
+from argaze import DataFeatures
+
+import my_package
+
+# Set working directory to enable relative file path loading
+DataFeatures.set_working_directory('path/to/folder')
+
+# Edit a dict with context configuration
+configuration = {
+ "name": "My context",
+ "parameter": ...,
+ "pipeline": ...
+}
+
+# Load a context from a package
+with DataFeatures.from_dict(my_package.MyContext, configuration) as context:
+
+ while context.is_running():
+
+ # Do something with context
+ ...
+
+ # Wait some time eventually
+ ...
+```
+
+## Manage context
+
+Check the context or the pipeline type to adapt features.
+
+```python
+from argaze import ArFeatures
+
+# Assuming the context is loaded and is running
+...
+
+ # Check context type
+
+ # Live processing case: calibration method is available
+ if issubclass(type(context), ArFeatures.LiveProcessingContext):
+ ...
+
+ # Post processing case: more playback methods are available
+ if issubclass(type(context), ArFeatures.PostProcessingContext):
+ ...
+
+ # Check pipeline type
+
+ # Screen-based case: only gaze positions are processes
+ if issubclass(type(context.pipeline), ArFeatures.ArFrame):
+ ...
+
+ # Head-mounted case: camera images also processes
+ if issubclass(type(context.pipeline), ArFeatures.ArCamera):
+ ...
+```
+
+## Display context
+
+The context image can be displayed in low priority to not block pipeline processing.
+
+```python
+# Assuming the context is loaded and is running
+...
+
+ # Display context if the pipeline is available
+ try:
+
+ ... = context.image(wait = False)
+
+ except DataFeatures.SharedObjectBusy:
+
+ pass
+```
diff --git a/docs/user_guide/eye_tracking_context/advanced_topics/timestamped_gaze_positions_edition.md b/docs/user_guide/eye_tracking_context/advanced_topics/timestamped_gaze_positions_edition.md
new file mode 100644
index 0000000..340dbaf
--- /dev/null
+++ b/docs/user_guide/eye_tracking_context/advanced_topics/timestamped_gaze_positions_edition.md
@@ -0,0 +1,66 @@
+Edit timestamped gaze positions
+===============================
+
+Whatever eye data comes from a file on disk or from a live stream, timestamped gaze positions are required before going further.
+
+![Timestamped gaze positions](../../../img/timestamped_gaze_positions.png)
+
+## Import timestamped gaze positions from CSV file
+
+It is possible to load timestamped gaze positions from a [Pandas DataFrame](https://pandas.pydata.org/docs/getting_started/intro_tutorials/01_table_oriented.html#min-tut-01-tableoriented) object which can be loaded from a CSV file.
+
+```python
+from argaze import GazeFeatures
+import pandas
+
+# Load gaze positions from a CSV file into Panda Dataframe
+dataframe = pandas.read_csv('gaze_positions.csv', delimiter=",", low_memory=False)
+
+# Convert Panda dataframe into timestamped gaze positions precising the use of each specific column labels
+ts_gaze_positions = GazeFeatures.TimeStampedGazePositions.from_dataframe(dataframe, timestamp = 'Recording timestamp [ms]', x = 'Gaze point X [px]', y = 'Gaze point Y [px]')
+
+# Iterate over timestamped gaze positions
+for timestamped_gaze_position in ts_gaze_positions:
+
+ # Do something with each timestamped gaze position
+ ...
+```
+
+## Edit timestamped gaze positions from live stream
+
+Real-time gaze positions can be edited thanks to the [GazePosition](../../../argaze.md/#argaze.GazeFeatures.GazePosition) class.
+Besides, timestamps can be edited from the incoming data stream or, if not available, they can be edited thanks to the Python [time package](https://docs.python.org/3/library/time.html).
+
+```python
+from argaze import GazeFeatures
+
+# Assuming to be inside the function where timestamp_µs, gaze_x and gaze_y values are catched
+...
+
+ # Define a timestamped gaze position converting microsecond timestamp into second timestamp
+ timestamped_gaze_position = GazeFeatures.GazePosition((gaze_x, gaze_y), timestamp=timestamp_µs * 1e-6)
+
+ # Do something with each timestamped gaze position
+ ...
+```
+
+```python
+from argaze import GazeFeatures
+
+import time
+
+# Initialize timestamp
+start_time = time.time()
+
+# Assuming to be inside the function where only gaze_x and gaze_y values are catched (no timestamp)
+...
+
+ # Define a timestamped gaze position with millisecond timestamp
+ timestamped_gaze_position = GazeFeatures.GazePosition((gaze_x, gaze_y), timestamp=int((time.time() - start_time) * 1e3))
+
+ # Do something with each timestamped gaze position
+ ...
+```
+
+!!! warning "Free time unit"
+ Timestamps can either be integers or floats, seconds, milliseconds or what ever you need. The only concern is that all time values used in further configurations have to be in the same unit.