aboutsummaryrefslogtreecommitdiff
path: root/src/argaze/utils/tobii_segment_gaze_movements_export.py
diff options
context:
space:
mode:
Diffstat (limited to 'src/argaze/utils/tobii_segment_gaze_movements_export.py')
-rw-r--r--src/argaze/utils/tobii_segment_gaze_movements_export.py210
1 files changed, 139 insertions, 71 deletions
diff --git a/src/argaze/utils/tobii_segment_gaze_movements_export.py b/src/argaze/utils/tobii_segment_gaze_movements_export.py
index 9fe6c36..3cd6dff 100644
--- a/src/argaze/utils/tobii_segment_gaze_movements_export.py
+++ b/src/argaze/utils/tobii_segment_gaze_movements_export.py
@@ -24,8 +24,8 @@ def main():
parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='path to a tobii segment folder', required=True)
parser.add_argument('-a', '--aoi', metavar='AOI_NAME', type=str, default=None, help='aoi name where to project gaze', required=True)
parser.add_argument('-t', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)')
- parser.add_argument('-di', '--dispersion_threshold', metavar='DISPERSION_THRESHOLD', type=int, default=50, help='dispersion threshold in pixel')
- parser.add_argument('-du', '--duration_threshold', metavar='DURATION_THRESHOLD', type=int, default=200, help='duration threshold in millisecond')
+ parser.add_argument('-dev', '--deviation_max_threshold', metavar='DISPERSION_THRESHOLD', type=int, default=50, help='maximal distance for fixation identification in pixel')
+ parser.add_argument('-dmin', '--duration_min_threshold', metavar='DURATION_MIN_THRESHOLD', type=int, default=200, help='minimal duration for fixation identification in millisecond')
parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)')
parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction)
args = parser.parse_args()
@@ -62,7 +62,7 @@ def main():
fixations_json_filepath = f'{destination_path}/gaze_fixations.json'
saccades_json_filepath = f'{destination_path}/gaze_saccades.json'
- unknown_json_filepath = f'{destination_path}/gaze_unknown.json'
+ movements_json_filepath = f'{destination_path}/gaze_movements.json'
gaze_status_json_filepath = f'{destination_path}/gaze_status.json'
gaze_status_video_filepath = f'{destination_path}/gaze_status.mp4'
@@ -106,6 +106,7 @@ def main():
# Gaze projection metrics
ts_projection_metrics = DataStructures.TimeStampedBuffer()
+ invalid_gaze_position_count = 0
# Starting with no AOI projection
ts_current_aoi = 0
@@ -120,6 +121,9 @@ def main():
progress = ts - int(args.time_range[0] * 1e6)
MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'GazePositions projection:', suffix = 'Complete', length = 100)
+ # Edit default aoi error
+ current_aoi_error = 'No available AOI projection'
+
try:
# Get the last aoi projection until the current gaze position timestamp
@@ -130,8 +134,8 @@ def main():
# Catch aoi error to not update current aoi
if 'error' in current_aois.keys():
- # TODO: display error
- current_aoi_error = current_aois.pop('error')
+ # Remove extra error info after ':'
+ current_aoi_error = current_aois.pop('error').split(':')[0]
# Or update current aoi
elif args.aoi in current_aois.keys():
@@ -139,14 +143,17 @@ def main():
ts_current_aoi = ts_current_aois
current_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))
+ current_aoi_error = ''
+
# No aoi projection at the beginning
except KeyError as e:
pass
# Wait for available aoi
if current_aoi.empty:
-
- ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition()
+
+ ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(current_aoi_error)
+ invalid_gaze_position_count += 1
continue
# QUESTION: What todo if the current aoi is too old ?
@@ -155,10 +162,11 @@ def main():
ts_difference = ts - ts_current_aoi
# If aoi is not updated after the
- if ts_difference >= args.duration_threshold*1e3:
+ if ts_difference >= args.duration_min_threshold*1e3:
current_aoi = AOIFeatures.AreaOfInterest()
- ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition()
+ ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition('AOI projection is too old (> 1s)')
+ invalid_gaze_position_count += 1
continue
ts_projection_metrics[ts] = {'frame': ts_current_aois, 'age': ts_difference}
@@ -190,27 +198,43 @@ def main():
# Store inner gaze position for further movement processing
# TEMP: 1920x1080 are Screen_Plan dimensions
- ts_gaze_positions[ts] = GazeFeatures.GazePosition((round(inner_x*1920), round(inner_y*1080)))#, precision=inner_precision_px)
+ ts_gaze_positions[ts] = GazeFeatures.GazePosition((round(inner_x*1920), round((1.0 - inner_y)*1080)))#, precision=inner_precision_px)
+
+ else:
+
+ ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(f'GazePosition not inside {args.aoi}')
+ invalid_gaze_position_count += 1
+
+ else:
+
+ ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(f'Invalid Tobii GazePosition3D')
+ invalid_gaze_position_count += 1
- # Store unvalid gaze position for further movement processing
else:
- ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition()
+ ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(f'Invalid Tobii GazePosition')
+ invalid_gaze_position_count += 1
print(f'\nGazePositions projection metrics:')
- projection_metrics_dataframe = ts_projection_metrics.as_dataframe()
- print(f'\t AOI age mean (ms) = {projection_metrics_dataframe.age.mean() * 1e-3}')
- print(f'\t AOI age max (ms) = {projection_metrics_dataframe.age.max() * 1e-3}')
+
+ if len(ts_projection_metrics):
+ projection_metrics_dataframe = ts_projection_metrics.as_dataframe()
+ print(f'\t AOI age mean (ms): {projection_metrics_dataframe.age.mean() * 1e-3}')
+ print(f'\t AOI age max (ms): {projection_metrics_dataframe.age.max() * 1e-3}')
+ else:
+ print(print(f'\t no AOI projected'))
+
+ print(f'\t Invalid gaze positions: {invalid_gaze_position_count}')
print(f'\nGazeMovement identifier parameters:')
- print(f'\tDispersion threshold = {args.dispersion_threshold}')
- print(f'\tDuration threshold = {args.duration_threshold}')
+ print(f'\tDispersion threshold = {args.deviation_max_threshold}')
+ print(f'\tDuration threshold = {args.duration_min_threshold}')
# Start movement identification
- movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(args.dispersion_threshold, args.duration_threshold*1e3)
+ movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(args.deviation_max_threshold, args.duration_min_threshold*1e3)
ts_fixations = GazeFeatures.TimeStampedGazeMovements()
ts_saccades = GazeFeatures.TimeStampedGazeMovements()
- ts_unknown = GazeFeatures.TimeStampedGazeMovements()
+ ts_movements = GazeFeatures.TimeStampedGazeMovements()
ts_status = GazeFeatures.TimeStampedGazeStatus()
# Initialise progress bar
@@ -242,29 +266,29 @@ def main():
start_ts, start_position = gaze_movement.positions.first
- ts_unknown[start_ts] = gaze_movement
+ ts_movements[start_ts] = gaze_movement
for ts, position in gaze_movement.positions.items():
- ts_status[ts] = GazeFeatures.GazeStatus.from_position(position, 'UnknownGazeMovement', len(ts_unknown))
+ ts_status[ts] = GazeFeatures.GazeStatus.from_position(position, 'GazeMovement', len(ts_movements))
# Update Progress Bar
progress = start_ts - int(args.time_range[0] * 1e6)
- MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'GazeMovements identification:', suffix = 'Complete', length = 100)
+ MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'Gaze movements identification:', suffix = 'Complete', length = 100)
print(f'\nGazeMovements identification metrics:')
print(f'\t{len(ts_fixations)} fixations found')
print(f'\t{len(ts_saccades)} saccades found')
- print(f'\t{len(ts_unknown)} unknown movements found')
+ print(f'\t{len(ts_movements)} movements found')
ts_fixations.to_json(fixations_json_filepath)
- print(f'\nFixations saved into {fixations_json_filepath}')
+ print(f'\nGaze fixations saved into {fixations_json_filepath}')
ts_saccades.to_json(saccades_json_filepath)
- print(f'Saccades saved into {saccades_json_filepath}')
+ print(f'Gaze saccades saved into {saccades_json_filepath}')
- ts_unknown.to_json(unknown_json_filepath)
- print(f'Unknown movements saved into {unknown_json_filepath}')
+ ts_movements.to_json(movements_json_filepath)
+ print(f'Gaze movements saved into {movements_json_filepath}')
ts_status.to_json(gaze_status_json_filepath)
print(f'Gaze status saved into {gaze_status_json_filepath}')
@@ -289,7 +313,7 @@ def main():
fixations_exist = len(ts_fixations) > 0
saccades_exist = len(ts_saccades) > 0
- unknown_exist = len(ts_unknown) > 0
+ movements_exist = len(ts_movements) > 0
status_exist = len(ts_status) > 0
if fixations_exist:
@@ -299,27 +323,51 @@ def main():
if saccades_exist:
current_saccade_ts, current_saccade = ts_saccades.pop_first()
- if unknown_exist:
- current_unknown_ts, current_unknown = ts_unknown.pop_first()
+ if movements_exist:
+ current_movements_ts, current_movements = ts_movements.pop_first()
# Iterate on video frames
for video_ts, video_frame in tobii_segment_video.frames():
+ # This video frame is the reference until the next frame
+ # Here next frame is at + 40ms (25 fps)
+ # TODO: Get video fps to adapt
+ next_video_ts = video_ts + 40000
+
visu_matrix = numpy.zeros((1080, 1920, 3), numpy.uint8)
try:
- # Get next aoi projection at video frame time
+ # Get current aoi projection at video frame time
ts_current_aois, current_aois = ts_aois_projections.pop_first()
assert(ts_current_aois == video_ts)
- current_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))
+ # Catch aoi error to not update current aoi
+ if 'error' in current_aois.keys():
+
+ # Display error (remove extra info after ':')
+ current_aoi_error = current_aois.pop('error').split(':')[0]
+
+ # Select color error
+ if current_aoi_error == 'VideoTimeStamp missing':
+ color_error = (0, 0, 255)
+ else:
+ color_error = (0, 255, 255)
+
+ cv.rectangle(visu_matrix, (0, 100), (550, 150), (127, 127, 127), -1)
+ cv.putText(visu_matrix, current_aoi_error, (20, 130), cv.FONT_HERSHEY_SIMPLEX, 1, color_error, 1, cv.LINE_AA)
+
+ # Or update current aoi
+ elif args.aoi in current_aois.keys():
+
+ ts_current_aoi = ts_current_aois
+ current_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))
- # Apply perspective transform algorithm
- destination = numpy.float32([[0, 0],[1920, 0],[1920, 1080],[0, 1080]])
- aoi_matrix = cv.getPerspectiveTransform(current_aoi.astype(numpy.float32), destination)
- visu_matrix = cv.warpPerspective(video_frame.matrix, aoi_matrix, (1920, 1080))
+ # Apply perspective transform algorithm
+ destination = numpy.float32([[0, 1080],[1920, 1080],[1920, 0],[0, 0]])
+ aoi_matrix = cv.getPerspectiveTransform(current_aoi.astype(numpy.float32), destination)
+ visu_matrix = cv.warpPerspective(video_frame.matrix, aoi_matrix, (1920, 1080))
# Wait for aois projection
except KeyError:
@@ -328,56 +376,58 @@ def main():
if fixations_exist:
# Check next fixation
- if video_ts > current_fixation_ts + current_fixation.duration and len(ts_fixations) > 0:
+ if video_ts >= current_fixation_ts + current_fixation.duration and len(ts_fixations) > 0:
current_fixation_ts, current_fixation = ts_fixations.pop_first()
current_fixation_time_counter = 0
# While current time belongs to the current fixation
- if video_ts >= current_fixation_ts and video_ts <= current_fixation_ts + current_fixation.duration:
+ if video_ts >= current_fixation_ts and video_ts < current_fixation_ts + current_fixation.duration:
current_fixation_time_counter += 1
# Draw current fixation
- cv.circle(visu_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.dispersion), (0, 255, 0), current_fixation_time_counter)
- cv.circle(gaze_status_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.dispersion), (0, 255, 0))
+ cv.circle(visu_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.deviation_max), (0, 255, 0), current_fixation_time_counter)
+ cv.circle(gaze_status_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.deviation_max), (0, 255, 0))
if saccades_exist:
# Check next saccade
- if video_ts > current_saccade_ts + current_saccade.duration and len(ts_saccades) > 0:
+ if video_ts >= current_saccade_ts + current_saccade.duration and len(ts_saccades) > 0:
current_saccade_ts, current_saccade = ts_saccades.pop_first()
# While current time belongs to the current saccade
- if video_ts >= current_saccade_ts and video_ts <= current_saccade_ts + current_saccade.duration:
+ if video_ts >= current_saccade_ts and video_ts < current_saccade_ts + current_saccade.duration:
pass
- if unknown_exist:
+ if movements_exist:
- # Check next unknown movement
- if video_ts > current_unknown_ts + current_unknown.duration and len(ts_unknown) > 0:
+ # Check next movements movement
+ if video_ts >= current_movements_ts + current_movements.duration and len(ts_movements) > 0:
- current_unknown_ts, current_unknown = ts_unknown.pop_first()
+ current_movements_ts, current_movements = ts_movements.pop_first()
- # While current time belongs to the current unknown movement
- if video_ts >= current_unknown_ts and video_ts <= current_unknown_ts + current_unknown.duration:
+ # While current time belongs to the current movements movement
+ if video_ts >= current_movements_ts and video_ts < current_movements_ts + current_movements.duration:
pass
- # Draw all next gaze status
+ # Draw gaze status until next frame
try:
# Get next gaze status
+ ts_start, start_gaze_status = ts_status.first
ts_next, next_gaze_status = ts_status.first
- # Check next gaze status is not after current time
- while ts_next <= video_ts:
+ # Check next gaze status is not after next frame time
+ while ts_next < next_video_ts:
ts_start, start_gaze_status = ts_status.pop_first()
ts_next, next_gaze_status = ts_status.first
# Draw movement type
- if start_gaze_status.movement_index == next_gaze_status.movement_index \
+ if start_gaze_status.valid and next_gaze_status.valid \
+ and start_gaze_status.movement_index == next_gaze_status.movement_index \
and start_gaze_status.movement_type == next_gaze_status.movement_type:
if next_gaze_status.movement_type == 'Fixation':
@@ -394,53 +444,71 @@ def main():
except IndexError:
pass
- # Draw all next gaze positions
+ # Draw gaze positions until next frame
try:
# Get next gaze position
+ ts_start, start_gaze_position = ts_gaze_positions.first
ts_next, next_gaze_position = ts_gaze_positions.first
- # Check next gaze status is not after current time
- while ts_next <= video_ts:
+ # Gaze position count
+ gaze_position_count = 0
+
+ # Check next gaze position is not after next frame time
+ while ts_next < next_video_ts:
ts_start, start_gaze_position = ts_gaze_positions.pop_first()
ts_next, next_gaze_position = ts_gaze_positions.first
+ if not start_gaze_position.valid:
+
+ # Select color error
+ if start_gaze_position.message == 'VideoTimeStamp missing':
+ color_error = (0, 0, 255)
+ else:
+ color_error = (0, 255, 255)
+
+ # Write unvalid error message
+ cv.putText(visu_matrix, f'{ts_start*1e-3:.3f} ms: {start_gaze_position.message}', (20, 1060 - (gaze_position_count)*50), cv.FONT_HERSHEY_SIMPLEX, 1, color_error, 1, cv.LINE_AA)
+
+ # Draw start gaze
+ start_gaze_position.draw(visu_matrix)
+ start_gaze_position.draw(gaze_status_matrix)
+
if start_gaze_position.valid and next_gaze_position.valid:
- # Draw movement
+ # Draw movement from start to next
cv.line(visu_matrix, start_gaze_position, next_gaze_position, (0, 255, 255), 1)
cv.line(gaze_status_matrix, start_gaze_position, next_gaze_position, (0, 255, 255), 1)
- # Draw gaze
- next_gaze_position.draw(visu_matrix)
- next_gaze_position.draw(gaze_status_matrix)
+ gaze_position_count += 1
- # Write last next gaze status
- if next_gaze_position.valid:
+ if start_gaze_position.valid:
- cv.putText(visu_matrix, str(next_gaze_status.value), next_gaze_status.value, cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ # Write last start gaze position
+ cv.putText(visu_matrix, str(start_gaze_position.value), start_gaze_position.value, cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ cv.putText(gaze_status_matrix, str(start_gaze_position.value), start_gaze_position.value, cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ # Write last start gaze position timing
+ cv.rectangle(visu_matrix, (0, 50), (550, 100), (31, 31, 31), -1)
+ cv.putText(visu_matrix, f'Gaze time: {ts_start*1e-3:.3f} ms', (20, 85), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+
# Empty gaze position
except IndexError:
pass
- # Write last gaze position timing
- cv.rectangle(visu_matrix, (0, 50), (550, 100), (31, 31, 31), -1)
- cv.putText(visu_matrix, f'Gaze time: {ts_next*1e-3:.3f} ms', (20, 85), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
-
# Write segment timing
cv.rectangle(visu_matrix, (0, 0), (550, 50), (63, 63, 63), -1)
cv.putText(visu_matrix, f'Video time: {video_ts*1e-3:.3f} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
# Write movement identification parameters
- cv.rectangle(visu_matrix, (0, 100), (550, 260), (63, 63, 63), -1)
- cv.putText(visu_matrix, f'Dispersion max: {args.dispersion_threshold} px', (20, 160), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
- cv.putText(visu_matrix, f'Duration min: {args.duration_threshold} ms', (20, 220), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+ cv.rectangle(visu_matrix, (0, 150), (550, 310), (63, 63, 63), -1)
+ cv.putText(visu_matrix, f'Deviation max: {args.deviation_max_threshold} px', (20, 210), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+ cv.putText(visu_matrix, f'Duration min: {args.duration_min_threshold} ms', (20, 270), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
# Draw dispersion threshold circle
- cv.circle(visu_matrix, (args.dispersion_threshold + 400, 180), 2, (0, 255, 255), -1)
- cv.circle(visu_matrix, (args.dispersion_threshold + 400, 180), args.dispersion_threshold, (255, 150, 150), 1)
+ cv.circle(visu_matrix, (args.deviation_max_threshold + 400, 230), 2, (0, 255, 255), -1)
+ cv.circle(visu_matrix, (args.deviation_max_threshold + 400, 230), args.deviation_max_threshold, (255, 150, 150), 1)
if args.window: