aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThéo de la Hogue2022-04-20 20:45:37 +0200
committerThéo de la Hogue2022-04-20 20:45:37 +0200
commit0251deb5d7117f4fa4eb0d4ecf681c924eaf63ec (patch)
tree65c029625b933134c6d4d63c8568fd9a7e8948d3
parent2e0f8f4200da7bba4667e4bc098a3c5d964ae40e (diff)
downloadargaze-0251deb5d7117f4fa4eb0d4ecf681c924eaf63ec.zip
argaze-0251deb5d7117f4fa4eb0d4ecf681c924eaf63ec.tar.gz
argaze-0251deb5d7117f4fa4eb0d4ecf681c924eaf63ec.tar.bz2
argaze-0251deb5d7117f4fa4eb0d4ecf681c924eaf63ec.tar.xz
REmoving former DataStructures.DictObject inheritance
-rw-r--r--src/argaze/GazeFeatures.py6
-rw-r--r--src/argaze/utils/export_tobii_segment_aruco_visual_scan.py2
2 files changed, 4 insertions, 4 deletions
diff --git a/src/argaze/GazeFeatures.py b/src/argaze/GazeFeatures.py
index a382ddc..8f16b14 100644
--- a/src/argaze/GazeFeatures.py
+++ b/src/argaze/GazeFeatures.py
@@ -11,7 +11,7 @@ import numpy
FIXATION_MAX_DURATION = 1000
@dataclass
-class GazePosition(DataStructures.DictObject):
+class GazePosition():
"""Define gaze position."""
x: float
@@ -28,7 +28,7 @@ class TimeStampedGazePositions(DataStructures.TimeStampedBuffer):
super().__setitem__(key, value)
@dataclass
-class Fixation(DataStructures.DictObject):
+class Fixation():
"""Define fixation"""
duration: float
@@ -192,7 +192,7 @@ class DispersionBasedFixationIdentifier(FixationIdentifier):
return -1, None
@dataclass
-class VisualScanStep(DataStructures.DictObject):
+class VisualScanStep():
"""Define a visual scan step as a duration, the name of the area of interest and all its frames during the step."""
duration: float
diff --git a/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py b/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py
index 312e10d..c292967 100644
--- a/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py
+++ b/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py
@@ -111,7 +111,7 @@ def main():
# Draw video synchronized gaze position
gaze_position = GazeFeatures.GazePosition(int(closest_gaze_position.gp[0] * video_frame.width), int(closest_gaze_position.gp[1] * video_frame.height))
- cv.circle(video_frame.matrix, tuple(gaze_position), 4, (0, 255, 255), -1)
+ cv.circle(video_frame.matrix, gaze_position.as_tuple(), 4, (0, 255, 255), -1)
# Store gaze position at this time in millisecond
ts_gaze_positions[video_ts] = gaze_position