aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThéo de la Hogue2022-12-13 10:57:28 +0100
committerThéo de la Hogue2022-12-13 10:57:28 +0100
commit6839eebf20eb8563c10f4a8d794dba0ff6036457 (patch)
treea1c8a6da6d3703c97c9db8329a33a70a3e28bf47
parent76273c33348e1e84cbffb8bd0a7cea6d1aee1e74 (diff)
downloadargaze-6839eebf20eb8563c10f4a8d794dba0ff6036457.zip
argaze-6839eebf20eb8563c10f4a8d794dba0ff6036457.tar.gz
argaze-6839eebf20eb8563c10f4a8d794dba0ff6036457.tar.bz2
argaze-6839eebf20eb8563c10f4a8d794dba0ff6036457.tar.xz
Renaming dispersion into deviation_max. Removing UnknownGazeMovement class to use generic GazeMovement instead. Displaying gaze postion related to each frame and writing invalid position message.
-rw-r--r--src/argaze.test/GazeAnalysis/DispersionBasedGazeMovementIdentifier.py10
-rw-r--r--src/argaze/GazeAnalysis/DispersionBasedGazeMovementIdentifier.py112
-rw-r--r--src/argaze/utils/tobii_segment_gaze_metrics_export.py91
-rw-r--r--src/argaze/utils/tobii_segment_gaze_movements_export.py210
4 files changed, 249 insertions, 174 deletions
diff --git a/src/argaze.test/GazeAnalysis/DispersionBasedGazeMovementIdentifier.py b/src/argaze.test/GazeAnalysis/DispersionBasedGazeMovementIdentifier.py
index c731cef..55fab29 100644
--- a/src/argaze.test/GazeAnalysis/DispersionBasedGazeMovementIdentifier.py
+++ b/src/argaze.test/GazeAnalysis/DispersionBasedGazeMovementIdentifier.py
@@ -95,7 +95,7 @@ class TestDispersionBasedGazeMovementIdentifierClass(unittest.TestCase):
max_time = 0.1
ts_gaze_positions = build_gaze_fixation(size, center, dispersion, start_time, min_time, max_time)
- gaze_movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(dispersion_threshold=dispersion, duration_threshold=max_time*2)
+ gaze_movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(deviation_max_threshold=dispersion, duration_min_threshold=max_time*2)
ts_fixations, ts_saccades, ts_unknown, ts_status = gaze_movement_identifier.identify(ts_gaze_positions)
# Check result size
@@ -128,7 +128,7 @@ class TestDispersionBasedGazeMovementIdentifierClass(unittest.TestCase):
ts_gaze_positions = ts_gaze_positions_A.append(ts_gaze_positions_B)
- gaze_movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(dispersion_threshold=dispersion, duration_threshold=max_time*2)
+ gaze_movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(deviation_max_threshold=dispersion, duration_min_threshold=max_time*2)
ts_fixations, ts_saccades, ts_unknown, ts_status = gaze_movement_identifier.identify(ts_gaze_positions)
# Check result size
@@ -181,7 +181,7 @@ class TestDispersionBasedGazeMovementIdentifierClass(unittest.TestCase):
ts_gaze_positions = ts_gaze_positions_A.append(ts_move_positions).append(ts_gaze_positions_B)
- gaze_movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(dispersion_threshold=dispersion, duration_threshold=max_time*2)
+ gaze_movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(deviation_max_threshold=dispersion, duration_min_threshold=max_time*2)
ts_fixations, ts_saccades, ts_unknown, ts_status = gaze_movement_identifier.identify(ts_gaze_positions)
# Check result size
@@ -226,7 +226,7 @@ class TestDispersionBasedGazeMovementIdentifierClass(unittest.TestCase):
ts_gaze_positions = build_gaze_fixation(size, center, dispersion, start_time, min_time, max_time, validity)
- gaze_movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(dispersion_threshold=dispersion, duration_threshold=max_time*2)
+ gaze_movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(deviation_max_threshold=dispersion, duration_min_threshold=max_time*2)
ts_fixations, ts_saccades, ts_unknown, ts_status = gaze_movement_identifier.identify(ts_gaze_positions)
# Check result size
@@ -287,7 +287,7 @@ class TestDispersionBasedGazeMovementIdentifierClass(unittest.TestCase):
ts_gaze_positions = ts_gaze_positions_A.append(ts_gaze_positions_B)
- gaze_movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(dispersion_threshold=dispersion, duration_threshold=max_time*2)
+ gaze_movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(deviation_max_threshold=dispersion, duration_min_threshold=max_time*2)
ts_fixations, ts_saccades, ts_unknown, ts_status = gaze_movement_identifier.identify(ts_gaze_positions)
# Check result size
diff --git a/src/argaze/GazeAnalysis/DispersionBasedGazeMovementIdentifier.py b/src/argaze/GazeAnalysis/DispersionBasedGazeMovementIdentifier.py
index e205189..94b6357 100644
--- a/src/argaze/GazeAnalysis/DispersionBasedGazeMovementIdentifier.py
+++ b/src/argaze/GazeAnalysis/DispersionBasedGazeMovementIdentifier.py
@@ -11,15 +11,18 @@ import numpy
class Fixation(GazeFeatures.Fixation):
"""Define dispersion based fixation."""
- dispersion: float = field(init=False)
- """Dispersion of all gaze positions belonging to the fixation."""
+ centroid: tuple = field(init=False)
+ """Centroïd of all gaze positions belonging to the fixation."""
+
+ deviation_max: float = field(init=False)
+ """Maximal gaze position distance to the centroïd."""
+
+ deviation_mean: float = field(init=False)
+ """Average of gaze position distances to the centroïd."""
euclidian: bool = field(default=True)
"""Does the distance is calculated in euclidian way."""
- centroid: tuple = field(init=False)
- """Centroïd of all gaze positions belonging to the fixation."""
-
def __post_init__(self):
super().__post_init__()
@@ -44,25 +47,29 @@ class Fixation(GazeFeatures.Fixation):
dist = numpy.sum(dist, axis=1)
dist = numpy.sqrt(dist)
- __dispersion = max(dist)
+ __deviation_max = max(dist)
+ __deviation_mean = numpy.mean(dist)
else:
- __dispersion = (max(x_list) - min(x_list)) + (max(y_list) - min(y_list))
-
- # Update frozen dispersion attribute
- object.__setattr__(self, 'dispersion', __dispersion)
+ __deviation_max = (max(x_list) - min(x_list)) + (max(y_list) - min(y_list))
# Update frozen centroid attribute
object.__setattr__(self, 'centroid', (cx, cy))
+ # Update frozen deviation_max attribute
+ object.__setattr__(self, 'deviation_max', __deviation_max)
+
+ # Update frozen deviation_mean attribute
+ object.__setattr__(self, 'deviation_mean', __deviation_mean)
+
def overlap(self, fixation) -> float:
"""Does this fixation overlap another fixation?"""
dist = (self.centroid[0] - fixation.centroid[0])**2 + (self.centroid[1] - fixation.centroid[1])**2
dist = numpy.sqrt(dist)
- return dist < (self.dispersion + fixation.dispersion)
+ return dist <= (self.deviation_max + fixation.deviation_max)
def contains_point(self, gaze_position) -> bool:
"""Is a point inside fixation?"""
@@ -70,7 +77,7 @@ class Fixation(GazeFeatures.Fixation):
dist = (self.centroid[0] - gaze_position[0])**2 + (self.centroid[1] - gaze_position[1])**2
dist = numpy.sqrt(dist)
- return dist < self.dispersion
+ return dist <= self.deviation_max
def merge(self, fixation) -> float:
"""Merge another fixation into this fixation."""
@@ -85,13 +92,6 @@ class Saccade(GazeFeatures.Saccade):
def __post_init__(self):
super().__post_init__()
-@dataclass(frozen=True)
-class UnknownGazeMovement(GazeFeatures.UnknownGazeMovement):
- """Define dispersion based unknown movement."""
-
- def __post_init__(self):
- super().__post_init__()
-
@dataclass
class GazeMovementIdentifier(GazeFeatures.GazeMovementIdentifier):
"""Implementation of the I-DT algorithm as described in:
@@ -102,11 +102,12 @@ class GazeMovementIdentifier(GazeFeatures.GazeMovementIdentifier):
71-78. [DOI=http://dx.doi.org/10.1145/355017.355028](DOI=http://dx.doi.org/10.1145/355017.355028)
"""
- dispersion_threshold: int|float
- """Maximal distance allowed to consider several gaze positions as a fixation."""
+ deviation_max_threshold: int|float
+ """Maximal distance allowed to consider a gaze movement as a fixation."""
- duration_threshold: int|float
- """Minimal duration allowed to consider several gaze positions as a fixation."""
+ duration_min_threshold: int|float
+ """Minimal duration allowed to consider a gaze movement as a fixation.
+ It is also used as maximal duration allowed to consider a gaze movement as a saccade."""
def __iter__(self) -> GazeFeatures.GazeMovementType:
"""GazeMovement identification generator."""
@@ -123,6 +124,15 @@ class GazeMovementIdentifier(GazeFeatures.GazeMovementIdentifier):
while not gaze_position_current.valid and len(self.__ts_gaze_positions) > 0:
ts_current, gaze_position_current = self.__ts_gaze_positions.pop_first()
+ # Output last fixation after too much unvalid positons
+ if self.__last_fixation != None:
+
+ ts_last, gaze_position_last = self.__last_fixation.positions.last
+
+ if (ts_current - ts_last) > self.duration_min_threshold:
+
+ yield self.__last_fixation
+
# Select current and next valid gaze positions until a duration threshold
valid_gaze_positions = GazeFeatures.TimeStampedGazePositions()
valid_gaze_positions[ts_current] = gaze_position_current
@@ -132,7 +142,7 @@ class GazeMovementIdentifier(GazeFeatures.GazeMovementIdentifier):
for ts_next, gaze_position_next in self.__ts_gaze_positions.items():
- if (ts_next - ts_current) < self.duration_threshold:
+ if (ts_next - ts_current) < self.duration_min_threshold:
# Store valid position
if gaze_position_next.valid:
@@ -155,7 +165,7 @@ class GazeMovementIdentifier(GazeFeatures.GazeMovementIdentifier):
new_fixation = Fixation(valid_gaze_positions)
# Dispersion small enough: it is a fixation ! Try to extend it
- if new_fixation.dispersion <= self.dispersion_threshold:
+ if new_fixation.deviation_max <= self.deviation_max_threshold:
# Remove valid and unvalid gaze positions as there as now stored in new fixation
# -1 as current gaze position have already been poped
@@ -181,11 +191,11 @@ class GazeMovementIdentifier(GazeFeatures.GazeMovementIdentifier):
extended_fixation = Fixation(extended_gaze_positions)
# Dispersion is too wide : break
- if extended_fixation.dispersion > self.dispersion_threshold:
+ if extended_fixation.deviation_max > self.deviation_max_threshold:
break
- # NOTE : The last extended position is out of the fixation : this position will be popped later
+ # NOTE: The last extended position is out of the fixation : this position will be popped later
# Update new fixation
new_fixation = extended_fixation
@@ -207,7 +217,7 @@ class GazeMovementIdentifier(GazeFeatures.GazeMovementIdentifier):
self.__last_fixation.update()
new_fixation.update()
- # Edit inter movement gaze positions
+ # Edit inter fixations movement gaze positions
movement_gaze_positions = GazeFeatures.TimeStampedGazePositions()
# Edit first movement gaze position
@@ -234,34 +244,23 @@ class GazeMovementIdentifier(GazeFeatures.GazeMovementIdentifier):
# Edit last movement gaze position
movement_gaze_positions[stop_movement_ts] = stop_position
- # Short time between fixations :
- # this movement is a saccade unless last and new fixations overlap
- if stop_movement_ts - start_movement_ts <= self.duration_threshold:
-
- # Does new fixation overlap last fixation?
- if self.__last_fixation.overlap(new_fixation):
+ # End of inter fixations movement edition
- merged_positions = self.__last_fixation.positions
- merged_positions.append(movement_gaze_positions)
- merged_positions.append(new_fixation.positions)
+ # Does new fixation overlap last fixation?
+ if self.__last_fixation.overlap(new_fixation):
- self.__last_fixation = Fixation(merged_positions)
+ # Merge new fixation into last fixation
+ self.__last_fixation.merge(new_fixation)
- # Forget new fixation
- new_fixation = None
+ # QUESTION: What to do if the time between the two fixations it very long ?
+ # It would be dangerous to set a timeout value as a fixation duration has no limit.
- else:
-
- # Output last fixation
- yield self.__last_fixation
+ # Forget new fixation
+ new_fixation = None
- # New fixation becomes the last fixation to allow further merging
- self.__last_fixation = new_fixation
+ # NOTE: Ignore inter fixations gaze positions: there was probably noisy positions.
- # Output saccade
- yield Saccade(movement_gaze_positions)
-
- # Too much time between fixations: this movement is unknown
+ # Otherwise,
else:
# Output last fixation
@@ -270,8 +269,17 @@ class GazeMovementIdentifier(GazeFeatures.GazeMovementIdentifier):
# New fixation becomes the last fixation to allow further merging
self.__last_fixation = new_fixation
- # Output unknown movement
- yield UnknownGazeMovement(movement_gaze_positions)
+ # Short time between fixations : this movement is a saccade
+ if stop_movement_ts - start_movement_ts <= self.duration_min_threshold:
+
+ # Output saccade
+ yield Saccade(movement_gaze_positions)
+
+ # Too much time between fixations: this movement is unknown
+ else:
+
+ # Output unknown movement
+ yield GazeFeatures.GazeMovement(movement_gaze_positions)
# In any case, forget former unmatched gaze positions
unmatched_gaze_positions = GazeFeatures.TimeStampedGazePositions()
diff --git a/src/argaze/utils/tobii_segment_gaze_metrics_export.py b/src/argaze/utils/tobii_segment_gaze_metrics_export.py
index c17fa7a..2935442 100644
--- a/src/argaze/utils/tobii_segment_gaze_metrics_export.py
+++ b/src/argaze/utils/tobii_segment_gaze_metrics_export.py
@@ -58,7 +58,7 @@ def main():
fixations_json_filepath = f'{destination_path}/gaze_fixations.json'
saccades_json_filepath = f'{destination_path}/gaze_saccades.json'
- unknown_json_filepath = f'{destination_path}/gaze_unknown.json'
+ movements_json_filepath = f'{destination_path}/gaze_movements.json'
gaze_status_json_filepath = f'{destination_path}/gaze_status.json'
gaze_metrics_filepath = f'{destination_path}/gaze_metrics.csv'
@@ -66,13 +66,13 @@ def main():
# Load gaze movements
ts_fixations = GazeFeatures.TimeStampedGazeMovements.from_json(fixations_json_filepath)
ts_saccades = GazeFeatures.TimeStampedGazeMovements.from_json(saccades_json_filepath)
- ts_unknown = GazeFeatures.TimeStampedGazeMovements.from_json(unknown_json_filepath)
+ ts_movements = GazeFeatures.TimeStampedGazeMovements.from_json(movements_json_filepath)
ts_status = GazeFeatures.TimeStampedGazeStatus.from_json(gaze_status_json_filepath)
print(f'\nLoaded gaze movements count:')
print(f'\tFixations: {len(ts_fixations)}')
print(f'\tSaccades: {len(ts_saccades)}')
- print(f'\tUnknown movements: {len(ts_unknown)}')
+ print(f'\nMovements: {len(ts_movements)}')
# Load tobii segment
tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None)
@@ -91,7 +91,7 @@ def main():
fixations_exist = len(ts_fixations) > 0
saccades_exist = len(ts_saccades) > 0
- unknown_exist = len(ts_unknown) > 0
+ movements_exist = len(ts_movements) > 0
status_exist = len(ts_status) > 0
if fixations_exist:
@@ -116,16 +116,16 @@ def main():
# Add 'end' column
saccades_dataframe['end'] = saccades_dataframe.index + saccades_dataframe.duration
- if unknown_exist:
+ if movements_exist:
# Create pandas dataframe
- unknown_dataframe = ts_unknown.as_dataframe()
+ movements_dataframe = ts_movements.as_dataframe()
# Reset time range offset
- unknown_dataframe.index = unknown_dataframe.index - unknown_dataframe.index[0]
+ movements_dataframe.index = movements_dataframe.index - movements_dataframe.index[0]
# Add 'end' column
- unknown_dataframe['end'] = unknown_dataframe.index + unknown_dataframe.duration
+ movements_dataframe['end'] = movements_dataframe.index + movements_dataframe.duration
# Work with period of time in microseconds instead of seconds
period_duration = args.period * 1e6
@@ -141,6 +141,14 @@ def main():
# Store period duration
period_metrics['duration (ms)'] = period_duration * 1e-3
+ # Default fixation analysis
+ fixations_duration_sum = 0.0
+ period_metrics['fixations_number'] = 0
+ period_metrics['fixations_duration_mean (ms)'] = None
+ period_metrics['fixations_duration_sum (ms)'] = None
+ period_metrics['fixations_duration_ratio (%)'] = None
+ period_metrics['fixations_deviation_mean (px)'] = None
+
# Analyse fixations
if fixations_exist:
@@ -152,18 +160,19 @@ def main():
#print('\n* Fixations:\n', fixations_period_dataframe)
fixations_duration_sum = fixations_period_dataframe.duration.sum()
-
period_metrics['fixations_number'] = fixations_period_dataframe.shape[0]
-
period_metrics['fixations_duration_mean (ms)'] = fixations_period_dataframe.duration.mean() * 1e-3
period_metrics['fixations_duration_sum (ms)'] = fixations_duration_sum * 1e-3
period_metrics['fixations_duration_ratio (%)'] = fixations_duration_sum / period_duration * 100
+ period_metrics['fixations_deviation_mean (px)'] = fixations_period_dataframe.deviation_max.mean()
- period_metrics['fixations_dispersion_mean (px)'] = fixations_period_dataframe.dispersion.mean()
-
- else:
-
- period_metrics['fixations_number'] = 0
+ # Default saccades analysis
+ saccades_duration_sum = 0.0
+ period_metrics['saccades_number'] = 0
+ period_metrics['saccades_duration_mean (ms)'] = None
+ period_metrics['saccades_duration_sum (ms)'] = None
+ period_metrics['saccades_duration_ratio (%)'] = None
+ period_metrics['saccades_distance_mean (px)'] = None
# Analyse saccades
if saccades_exist:
@@ -176,53 +185,43 @@ def main():
#print('\n* Saccades:\n', saccades_period_dataframe)
saccades_duration_sum = saccades_period_dataframe.duration.sum()
-
period_metrics['saccades_number'] = saccades_period_dataframe.shape[0]
-
period_metrics['saccades_duration_mean (ms)'] = saccades_period_dataframe.duration.mean() * 1e-3
period_metrics['saccades_duration_sum (ms)'] = saccades_duration_sum * 1e-3
period_metrics['saccades_duration_ratio (%)'] = saccades_duration_sum / period_duration * 100
-
period_metrics['saccades_distance_mean (px)'] = saccades_period_dataframe.distance.mean()
- else:
+ # Default movements movement analysis
+ movements_duration_sum = 0.0
+ period_metrics['movements_number'] = 0
+ period_metrics['movements_duration_mean (ms)'] = None
+ period_metrics['movements_duration_sum (ms)'] = None
+ period_metrics['movements_duration_ratio (%)'] = None
- period_metrics['saccades_number'] = 0
-
- # Export unknown movements analysis
- if unknown_exist:
+ # Analyse movements movements
+ if movements_exist:
# Select period
- unknown_period_dataframe = unknown_dataframe[(unknown_dataframe.index >= period_start_ts) & (unknown_dataframe.end < period_end_ts)]
+ movements_period_dataframe = movements_dataframe[(movements_dataframe.index >= period_start_ts) & (movements_dataframe.end < period_end_ts)]
- if not unknown_period_dataframe.empty:
+ if not movements_period_dataframe.empty:
- #print('\n* Unknown movements:\n', unknown_period_dataframe)
-
- unknown_duration_sum = unknown_period_dataframe.duration.sum()
-
- #period_metrics['unknown_number'] = unknown_period_dataframe.shape[0]
- #period_metrics['unknown_duration_mean (ms)'] = unknown_period_dataframe.duration.mean() * 1e-3
- #period_metrics['unknown_duration_sum (ms)'] = unknown_duration_sum * 1e-3
- #period_metrics['unknown_duration_ratio (%)'] = unknown_duration_sum / period_duration * 100
-
- #else:
-
- #period_metrics['unknown_number'] = 0
-
- if fixations_exist and saccades_exist:
-
- if not fixations_period_dataframe.empty and not saccades_period_dataframe.empty:
+ #print('\n* Unknown movements:\n', movements_period_dataframe)
- period_metrics['exploit_explore_ratio'] = fixations_duration_sum / saccades_duration_sum
+ movements_duration_sum = movements_period_dataframe.duration.sum()
+ period_metrics['movements_number'] = movements_period_dataframe.shape[0]
+ period_metrics['movements_duration_mean (ms)'] = movements_period_dataframe.duration.mean() * 1e-3
+ period_metrics['movements_duration_sum (ms)'] = movements_duration_sum * 1e-3
+ period_metrics['movements_duration_ratio (%)'] = movements_duration_sum / period_duration * 100
- if unknown_exist and not unknown_period_dataframe.empty:
+ # Analyse exploit/explore
+ if not saccades_exist and not movements_exist:
- period_metrics['unknown_movements_ratio'] = unknown_duration_sum / (fixations_duration_sum + saccades_duration_sum)
+ period_metrics['exploit_explore_ratio'] = None
- else:
+ else:
- period_metrics['unknown_movements_ratio'] = 0.0
+ period_metrics['exploit_explore_ratio'] = fixations_duration_sum / (saccades_duration_sum + movements_duration_sum)
# Append period metrics
ts_metrics[int(period_start_ts * 1e-3)] = period_metrics
diff --git a/src/argaze/utils/tobii_segment_gaze_movements_export.py b/src/argaze/utils/tobii_segment_gaze_movements_export.py
index 9fe6c36..3cd6dff 100644
--- a/src/argaze/utils/tobii_segment_gaze_movements_export.py
+++ b/src/argaze/utils/tobii_segment_gaze_movements_export.py
@@ -24,8 +24,8 @@ def main():
parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='path to a tobii segment folder', required=True)
parser.add_argument('-a', '--aoi', metavar='AOI_NAME', type=str, default=None, help='aoi name where to project gaze', required=True)
parser.add_argument('-t', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)')
- parser.add_argument('-di', '--dispersion_threshold', metavar='DISPERSION_THRESHOLD', type=int, default=50, help='dispersion threshold in pixel')
- parser.add_argument('-du', '--duration_threshold', metavar='DURATION_THRESHOLD', type=int, default=200, help='duration threshold in millisecond')
+ parser.add_argument('-dev', '--deviation_max_threshold', metavar='DISPERSION_THRESHOLD', type=int, default=50, help='maximal distance for fixation identification in pixel')
+ parser.add_argument('-dmin', '--duration_min_threshold', metavar='DURATION_MIN_THRESHOLD', type=int, default=200, help='minimal duration for fixation identification in millisecond')
parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)')
parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction)
args = parser.parse_args()
@@ -62,7 +62,7 @@ def main():
fixations_json_filepath = f'{destination_path}/gaze_fixations.json'
saccades_json_filepath = f'{destination_path}/gaze_saccades.json'
- unknown_json_filepath = f'{destination_path}/gaze_unknown.json'
+ movements_json_filepath = f'{destination_path}/gaze_movements.json'
gaze_status_json_filepath = f'{destination_path}/gaze_status.json'
gaze_status_video_filepath = f'{destination_path}/gaze_status.mp4'
@@ -106,6 +106,7 @@ def main():
# Gaze projection metrics
ts_projection_metrics = DataStructures.TimeStampedBuffer()
+ invalid_gaze_position_count = 0
# Starting with no AOI projection
ts_current_aoi = 0
@@ -120,6 +121,9 @@ def main():
progress = ts - int(args.time_range[0] * 1e6)
MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'GazePositions projection:', suffix = 'Complete', length = 100)
+ # Edit default aoi error
+ current_aoi_error = 'No available AOI projection'
+
try:
# Get the last aoi projection until the current gaze position timestamp
@@ -130,8 +134,8 @@ def main():
# Catch aoi error to not update current aoi
if 'error' in current_aois.keys():
- # TODO: display error
- current_aoi_error = current_aois.pop('error')
+ # Remove extra error info after ':'
+ current_aoi_error = current_aois.pop('error').split(':')[0]
# Or update current aoi
elif args.aoi in current_aois.keys():
@@ -139,14 +143,17 @@ def main():
ts_current_aoi = ts_current_aois
current_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))
+ current_aoi_error = ''
+
# No aoi projection at the beginning
except KeyError as e:
pass
# Wait for available aoi
if current_aoi.empty:
-
- ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition()
+
+ ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(current_aoi_error)
+ invalid_gaze_position_count += 1
continue
# QUESTION: What todo if the current aoi is too old ?
@@ -155,10 +162,11 @@ def main():
ts_difference = ts - ts_current_aoi
# If aoi is not updated after the
- if ts_difference >= args.duration_threshold*1e3:
+ if ts_difference >= args.duration_min_threshold*1e3:
current_aoi = AOIFeatures.AreaOfInterest()
- ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition()
+ ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition('AOI projection is too old (> 1s)')
+ invalid_gaze_position_count += 1
continue
ts_projection_metrics[ts] = {'frame': ts_current_aois, 'age': ts_difference}
@@ -190,27 +198,43 @@ def main():
# Store inner gaze position for further movement processing
# TEMP: 1920x1080 are Screen_Plan dimensions
- ts_gaze_positions[ts] = GazeFeatures.GazePosition((round(inner_x*1920), round(inner_y*1080)))#, precision=inner_precision_px)
+ ts_gaze_positions[ts] = GazeFeatures.GazePosition((round(inner_x*1920), round((1.0 - inner_y)*1080)))#, precision=inner_precision_px)
+
+ else:
+
+ ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(f'GazePosition not inside {args.aoi}')
+ invalid_gaze_position_count += 1
+
+ else:
+
+ ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(f'Invalid Tobii GazePosition3D')
+ invalid_gaze_position_count += 1
- # Store unvalid gaze position for further movement processing
else:
- ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition()
+ ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(f'Invalid Tobii GazePosition')
+ invalid_gaze_position_count += 1
print(f'\nGazePositions projection metrics:')
- projection_metrics_dataframe = ts_projection_metrics.as_dataframe()
- print(f'\t AOI age mean (ms) = {projection_metrics_dataframe.age.mean() * 1e-3}')
- print(f'\t AOI age max (ms) = {projection_metrics_dataframe.age.max() * 1e-3}')
+
+ if len(ts_projection_metrics):
+ projection_metrics_dataframe = ts_projection_metrics.as_dataframe()
+ print(f'\t AOI age mean (ms): {projection_metrics_dataframe.age.mean() * 1e-3}')
+ print(f'\t AOI age max (ms): {projection_metrics_dataframe.age.max() * 1e-3}')
+ else:
+ print(print(f'\t no AOI projected'))
+
+ print(f'\t Invalid gaze positions: {invalid_gaze_position_count}')
print(f'\nGazeMovement identifier parameters:')
- print(f'\tDispersion threshold = {args.dispersion_threshold}')
- print(f'\tDuration threshold = {args.duration_threshold}')
+ print(f'\tDispersion threshold = {args.deviation_max_threshold}')
+ print(f'\tDuration threshold = {args.duration_min_threshold}')
# Start movement identification
- movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(args.dispersion_threshold, args.duration_threshold*1e3)
+ movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(args.deviation_max_threshold, args.duration_min_threshold*1e3)
ts_fixations = GazeFeatures.TimeStampedGazeMovements()
ts_saccades = GazeFeatures.TimeStampedGazeMovements()
- ts_unknown = GazeFeatures.TimeStampedGazeMovements()
+ ts_movements = GazeFeatures.TimeStampedGazeMovements()
ts_status = GazeFeatures.TimeStampedGazeStatus()
# Initialise progress bar
@@ -242,29 +266,29 @@ def main():
start_ts, start_position = gaze_movement.positions.first
- ts_unknown[start_ts] = gaze_movement
+ ts_movements[start_ts] = gaze_movement
for ts, position in gaze_movement.positions.items():
- ts_status[ts] = GazeFeatures.GazeStatus.from_position(position, 'UnknownGazeMovement', len(ts_unknown))
+ ts_status[ts] = GazeFeatures.GazeStatus.from_position(position, 'GazeMovement', len(ts_movements))
# Update Progress Bar
progress = start_ts - int(args.time_range[0] * 1e6)
- MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'GazeMovements identification:', suffix = 'Complete', length = 100)
+ MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'Gaze movements identification:', suffix = 'Complete', length = 100)
print(f'\nGazeMovements identification metrics:')
print(f'\t{len(ts_fixations)} fixations found')
print(f'\t{len(ts_saccades)} saccades found')
- print(f'\t{len(ts_unknown)} unknown movements found')
+ print(f'\t{len(ts_movements)} movements found')
ts_fixations.to_json(fixations_json_filepath)
- print(f'\nFixations saved into {fixations_json_filepath}')
+ print(f'\nGaze fixations saved into {fixations_json_filepath}')
ts_saccades.to_json(saccades_json_filepath)
- print(f'Saccades saved into {saccades_json_filepath}')
+ print(f'Gaze saccades saved into {saccades_json_filepath}')
- ts_unknown.to_json(unknown_json_filepath)
- print(f'Unknown movements saved into {unknown_json_filepath}')
+ ts_movements.to_json(movements_json_filepath)
+ print(f'Gaze movements saved into {movements_json_filepath}')
ts_status.to_json(gaze_status_json_filepath)
print(f'Gaze status saved into {gaze_status_json_filepath}')
@@ -289,7 +313,7 @@ def main():
fixations_exist = len(ts_fixations) > 0
saccades_exist = len(ts_saccades) > 0
- unknown_exist = len(ts_unknown) > 0
+ movements_exist = len(ts_movements) > 0
status_exist = len(ts_status) > 0
if fixations_exist:
@@ -299,27 +323,51 @@ def main():
if saccades_exist:
current_saccade_ts, current_saccade = ts_saccades.pop_first()
- if unknown_exist:
- current_unknown_ts, current_unknown = ts_unknown.pop_first()
+ if movements_exist:
+ current_movements_ts, current_movements = ts_movements.pop_first()
# Iterate on video frames
for video_ts, video_frame in tobii_segment_video.frames():
+ # This video frame is the reference until the next frame
+ # Here next frame is at + 40ms (25 fps)
+ # TODO: Get video fps to adapt
+ next_video_ts = video_ts + 40000
+
visu_matrix = numpy.zeros((1080, 1920, 3), numpy.uint8)
try:
- # Get next aoi projection at video frame time
+ # Get current aoi projection at video frame time
ts_current_aois, current_aois = ts_aois_projections.pop_first()
assert(ts_current_aois == video_ts)
- current_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))
+ # Catch aoi error to not update current aoi
+ if 'error' in current_aois.keys():
+
+ # Display error (remove extra info after ':')
+ current_aoi_error = current_aois.pop('error').split(':')[0]
+
+ # Select color error
+ if current_aoi_error == 'VideoTimeStamp missing':
+ color_error = (0, 0, 255)
+ else:
+ color_error = (0, 255, 255)
+
+ cv.rectangle(visu_matrix, (0, 100), (550, 150), (127, 127, 127), -1)
+ cv.putText(visu_matrix, current_aoi_error, (20, 130), cv.FONT_HERSHEY_SIMPLEX, 1, color_error, 1, cv.LINE_AA)
+
+ # Or update current aoi
+ elif args.aoi in current_aois.keys():
+
+ ts_current_aoi = ts_current_aois
+ current_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))
- # Apply perspective transform algorithm
- destination = numpy.float32([[0, 0],[1920, 0],[1920, 1080],[0, 1080]])
- aoi_matrix = cv.getPerspectiveTransform(current_aoi.astype(numpy.float32), destination)
- visu_matrix = cv.warpPerspective(video_frame.matrix, aoi_matrix, (1920, 1080))
+ # Apply perspective transform algorithm
+ destination = numpy.float32([[0, 1080],[1920, 1080],[1920, 0],[0, 0]])
+ aoi_matrix = cv.getPerspectiveTransform(current_aoi.astype(numpy.float32), destination)
+ visu_matrix = cv.warpPerspective(video_frame.matrix, aoi_matrix, (1920, 1080))
# Wait for aois projection
except KeyError:
@@ -328,56 +376,58 @@ def main():
if fixations_exist:
# Check next fixation
- if video_ts > current_fixation_ts + current_fixation.duration and len(ts_fixations) > 0:
+ if video_ts >= current_fixation_ts + current_fixation.duration and len(ts_fixations) > 0:
current_fixation_ts, current_fixation = ts_fixations.pop_first()
current_fixation_time_counter = 0
# While current time belongs to the current fixation
- if video_ts >= current_fixation_ts and video_ts <= current_fixation_ts + current_fixation.duration:
+ if video_ts >= current_fixation_ts and video_ts < current_fixation_ts + current_fixation.duration:
current_fixation_time_counter += 1
# Draw current fixation
- cv.circle(visu_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.dispersion), (0, 255, 0), current_fixation_time_counter)
- cv.circle(gaze_status_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.dispersion), (0, 255, 0))
+ cv.circle(visu_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.deviation_max), (0, 255, 0), current_fixation_time_counter)
+ cv.circle(gaze_status_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.deviation_max), (0, 255, 0))
if saccades_exist:
# Check next saccade
- if video_ts > current_saccade_ts + current_saccade.duration and len(ts_saccades) > 0:
+ if video_ts >= current_saccade_ts + current_saccade.duration and len(ts_saccades) > 0:
current_saccade_ts, current_saccade = ts_saccades.pop_first()
# While current time belongs to the current saccade
- if video_ts >= current_saccade_ts and video_ts <= current_saccade_ts + current_saccade.duration:
+ if video_ts >= current_saccade_ts and video_ts < current_saccade_ts + current_saccade.duration:
pass
- if unknown_exist:
+ if movements_exist:
- # Check next unknown movement
- if video_ts > current_unknown_ts + current_unknown.duration and len(ts_unknown) > 0:
+ # Check next movements movement
+ if video_ts >= current_movements_ts + current_movements.duration and len(ts_movements) > 0:
- current_unknown_ts, current_unknown = ts_unknown.pop_first()
+ current_movements_ts, current_movements = ts_movements.pop_first()
- # While current time belongs to the current unknown movement
- if video_ts >= current_unknown_ts and video_ts <= current_unknown_ts + current_unknown.duration:
+ # While current time belongs to the current movements movement
+ if video_ts >= current_movements_ts and video_ts < current_movements_ts + current_movements.duration:
pass
- # Draw all next gaze status
+ # Draw gaze status until next frame
try:
# Get next gaze status
+ ts_start, start_gaze_status = ts_status.first
ts_next, next_gaze_status = ts_status.first
- # Check next gaze status is not after current time
- while ts_next <= video_ts:
+ # Check next gaze status is not after next frame time
+ while ts_next < next_video_ts:
ts_start, start_gaze_status = ts_status.pop_first()
ts_next, next_gaze_status = ts_status.first
# Draw movement type
- if start_gaze_status.movement_index == next_gaze_status.movement_index \
+ if start_gaze_status.valid and next_gaze_status.valid \
+ and start_gaze_status.movement_index == next_gaze_status.movement_index \
and start_gaze_status.movement_type == next_gaze_status.movement_type:
if next_gaze_status.movement_type == 'Fixation':
@@ -394,53 +444,71 @@ def main():
except IndexError:
pass
- # Draw all next gaze positions
+ # Draw gaze positions until next frame
try:
# Get next gaze position
+ ts_start, start_gaze_position = ts_gaze_positions.first
ts_next, next_gaze_position = ts_gaze_positions.first
- # Check next gaze status is not after current time
- while ts_next <= video_ts:
+ # Gaze position count
+ gaze_position_count = 0
+
+ # Check next gaze position is not after next frame time
+ while ts_next < next_video_ts:
ts_start, start_gaze_position = ts_gaze_positions.pop_first()
ts_next, next_gaze_position = ts_gaze_positions.first
+ if not start_gaze_position.valid:
+
+ # Select color error
+ if start_gaze_position.message == 'VideoTimeStamp missing':
+ color_error = (0, 0, 255)
+ else:
+ color_error = (0, 255, 255)
+
+ # Write unvalid error message
+ cv.putText(visu_matrix, f'{ts_start*1e-3:.3f} ms: {start_gaze_position.message}', (20, 1060 - (gaze_position_count)*50), cv.FONT_HERSHEY_SIMPLEX, 1, color_error, 1, cv.LINE_AA)
+
+ # Draw start gaze
+ start_gaze_position.draw(visu_matrix)
+ start_gaze_position.draw(gaze_status_matrix)
+
if start_gaze_position.valid and next_gaze_position.valid:
- # Draw movement
+ # Draw movement from start to next
cv.line(visu_matrix, start_gaze_position, next_gaze_position, (0, 255, 255), 1)
cv.line(gaze_status_matrix, start_gaze_position, next_gaze_position, (0, 255, 255), 1)
- # Draw gaze
- next_gaze_position.draw(visu_matrix)
- next_gaze_position.draw(gaze_status_matrix)
+ gaze_position_count += 1
- # Write last next gaze status
- if next_gaze_position.valid:
+ if start_gaze_position.valid:
- cv.putText(visu_matrix, str(next_gaze_status.value), next_gaze_status.value, cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ # Write last start gaze position
+ cv.putText(visu_matrix, str(start_gaze_position.value), start_gaze_position.value, cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ cv.putText(gaze_status_matrix, str(start_gaze_position.value), start_gaze_position.value, cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ # Write last start gaze position timing
+ cv.rectangle(visu_matrix, (0, 50), (550, 100), (31, 31, 31), -1)
+ cv.putText(visu_matrix, f'Gaze time: {ts_start*1e-3:.3f} ms', (20, 85), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+
# Empty gaze position
except IndexError:
pass
- # Write last gaze position timing
- cv.rectangle(visu_matrix, (0, 50), (550, 100), (31, 31, 31), -1)
- cv.putText(visu_matrix, f'Gaze time: {ts_next*1e-3:.3f} ms', (20, 85), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
-
# Write segment timing
cv.rectangle(visu_matrix, (0, 0), (550, 50), (63, 63, 63), -1)
cv.putText(visu_matrix, f'Video time: {video_ts*1e-3:.3f} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
# Write movement identification parameters
- cv.rectangle(visu_matrix, (0, 100), (550, 260), (63, 63, 63), -1)
- cv.putText(visu_matrix, f'Dispersion max: {args.dispersion_threshold} px', (20, 160), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
- cv.putText(visu_matrix, f'Duration min: {args.duration_threshold} ms', (20, 220), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+ cv.rectangle(visu_matrix, (0, 150), (550, 310), (63, 63, 63), -1)
+ cv.putText(visu_matrix, f'Deviation max: {args.deviation_max_threshold} px', (20, 210), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+ cv.putText(visu_matrix, f'Duration min: {args.duration_min_threshold} ms', (20, 270), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
# Draw dispersion threshold circle
- cv.circle(visu_matrix, (args.dispersion_threshold + 400, 180), 2, (0, 255, 255), -1)
- cv.circle(visu_matrix, (args.dispersion_threshold + 400, 180), args.dispersion_threshold, (255, 150, 150), 1)
+ cv.circle(visu_matrix, (args.deviation_max_threshold + 400, 230), 2, (0, 255, 255), -1)
+ cv.circle(visu_matrix, (args.deviation_max_threshold + 400, 230), args.deviation_max_threshold, (255, 150, 150), 1)
if args.window: