aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThéo de la Hogue2022-12-09 23:06:43 +0100
committerThéo de la Hogue2022-12-09 23:06:43 +0100
commit62ab70908db3ce8216abf6b15531fff33afd3e00 (patch)
tree04a7fcdadc301d33adc771dc2e173a92bd6eae35
parent1205cf6cd011e1ecce56759304ea0c9ab5666617 (diff)
downloadargaze-62ab70908db3ce8216abf6b15531fff33afd3e00.zip
argaze-62ab70908db3ce8216abf6b15531fff33afd3e00.tar.gz
argaze-62ab70908db3ce8216abf6b15531fff33afd3e00.tar.bz2
argaze-62ab70908db3ce8216abf6b15531fff33afd3e00.tar.xz
Displaying unknown movements.
-rw-r--r--src/argaze/utils/tobii_segment_gaze_movements_export.py328
1 files changed, 222 insertions, 106 deletions
diff --git a/src/argaze/utils/tobii_segment_gaze_movements_export.py b/src/argaze/utils/tobii_segment_gaze_movements_export.py
index 88f61d7..7e184ee 100644
--- a/src/argaze/utils/tobii_segment_gaze_movements_export.py
+++ b/src/argaze/utils/tobii_segment_gaze_movements_export.py
@@ -62,6 +62,7 @@ def main():
fixations_filepath = f'{destination_path}/gaze_fixations.csv'
saccades_filepath = f'{destination_path}/gaze_saccades.csv'
+ unknown_filepath = f'{destination_path}/gaze_unknown.csv'
gaze_status_filepath = f'{destination_path}/gaze_status.csv'
gaze_status_video_filepath = f'{destination_path}/gaze_status.mp4'
@@ -109,16 +110,17 @@ def main():
ts_projection_metrics = DataStructures.TimeStampedBuffer()
# Starting with no AOI projection
- selected_aoi = AOIFeatures.AreaOfInterest()
+ ts_current_aoi = 0
+ current_aoi = AOIFeatures.AreaOfInterest()
# Initialise progress bar
- MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGazePositions projection:', suffix = 'Complete', length = 100)
+ #MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGazePositions projection:', suffix = 'Complete', length = 100)
for ts, tobii_gaze_position in tobii_ts_gaze_positions.items():
# Update Progress Bar
progress = ts - int(args.time_range[0] * 1e6)
- MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'GazePositions projection:', suffix = 'Complete', length = 100)
+ #MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'GazePositions projection:', suffix = 'Complete', length = 100)
try:
@@ -127,34 +129,42 @@ def main():
assert(ts_current_aois <= ts)
- # Is the AOI projection valid ?
+ # Catch aoi error to not update current aoi
if 'error' in current_aois.keys():
# TODO: display error
current_aoi_error = current_aois.pop('error')
- # Wait for valid aoi projection
- continue
+ # Or update current aoi
+ elif args.aoi in current_aois.keys():
- # Is the selected aoi there ?
- if args.aoi in current_aois.keys():
+ ts_current_aoi = ts_current_aois
+ current_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))
- selected_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))
- selected_ts_aoi = ts_current_aois
+ # No aoi projection at the beginning
+ except KeyError as e:
+ pass
- # else use last one if exist
- elif selected_aoi.empty:
- continue
+ # Wait for available aoi
+ if current_aoi.empty:
+
+ ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition()
+ continue
+
+ # QUESTION: What todo if the current aoi is too old ?
+ # if the aoi didn't move it is not a problem...
+ # For the moment, we avoid 1s old aoi and we provide a metric to assess the problem
+ ts_difference = ts - ts_current_aoi
- # QUESTION: What todo if the current valid aoi is too old ?
- # if the aoi didn't move it is not a problem...
- # For the moment, we just provide a metric to assess its not too big
- ts_projection_metrics[ts] = {'frame': selected_ts_aoi, 'age': ts - selected_ts_aoi}
+ # If aoi is not updated after the
+ if ts_difference >= args.duration_threshold*1e3:
- # Wait for a first aoi projection
- except KeyError as e:
+ current_aoi = AOIFeatures.AreaOfInterest()
+ ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition()
continue
+ ts_projection_metrics[ts] = {'frame': ts_current_aois, 'age': ts_difference}
+
# Test gaze position validity
if tobii_gaze_position.validity == 0:
@@ -175,20 +185,18 @@ def main():
gaze_position = GazeFeatures.GazePosition(gaze_position_px, precision=gaze_precision_px)
# Project gaze position into selected aois
- if selected_aoi.contains_point(gaze_position.value):
+ if current_aoi.contains_point(gaze_position.value):
- inner_x, inner_y = selected_aoi.inner_axis(gaze_position.value)
- inner_precision_px = gaze_precision_px * tobii_segment_video.width * tobii_segment_video.height / selected_aoi.area
+ inner_x, inner_y = current_aoi.inner_axis(gaze_position.value)
+ inner_precision_px = gaze_precision_px * tobii_segment_video.width * tobii_segment_video.height / current_aoi.area
# Store inner gaze position for further movement processing
# TEMP: 1920x1080 are Screen_Plan dimensions
- # TODO? trunc ts at the millisecond before int(ts*1e-3)*1e3
ts_gaze_positions[ts] = GazeFeatures.GazePosition((round(inner_x*1920), round(inner_y*1080)))#, precision=inner_precision_px)
# Store unvalid gaze position for further movement processing
else:
- # TODO? trunc ts at the millisecond before int(ts*1e-3)*1e3
ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition()
print(f'\nGazePositions projection metrics:')
@@ -204,10 +212,11 @@ def main():
movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(args.dispersion_threshold, args.duration_threshold*1e3)
ts_fixations = GazeFeatures.TimeStampedGazeMovements()
ts_saccades = GazeFeatures.TimeStampedGazeMovements()
+ ts_unknown = GazeFeatures.TimeStampedGazeMovements()
ts_status = GazeFeatures.TimeStampedGazeStatus()
# Initialise progress bar
- MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGazeMovements identification:', suffix = 'Complete', length = 100)
+ #MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGazeMovements identification:', suffix = 'Complete', length = 100)
for gaze_movement in movement_identifier(ts_gaze_positions):
@@ -232,54 +241,84 @@ def main():
ts_status[end_ts] = GazeFeatures.GazeStatus.from_position(end_position, 'Saccade', len(ts_saccades))
else:
- continue
+
+ start_ts, start_position = gaze_movement.positions.first
+
+ ts_unknown[start_ts] = gaze_movement
+
+ for ts, position in gaze_movement.positions.items():
+
+ ts_status[ts] = GazeFeatures.GazeStatus.from_position(position, 'UnknownGazeMovement', len(ts_unknown))
# Update Progress Bar
progress = start_ts - int(args.time_range[0] * 1e6)
- MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'GazeMovements identification:', suffix = 'Complete', length = 100)
+ #MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'GazeMovements identification:', suffix = 'Complete', length = 100)
print(f'\nGazeMovements identification metrics:')
print(f'\t{len(ts_fixations)} fixations found')
print(f'\t{len(ts_saccades)} saccades found')
+ print(f'\t{len(ts_unknown)} unknown movements found')
- # Export fixations analysis
- fixations_dataframe = ts_fixations.as_dataframe()
- fixations_dataframe.to_csv(fixations_filepath, index=True)
- print(f'\nFixations saved into {fixations_filepath}')
+ # Prepare gaze metrics
+ metrics = {}
+ segment_duration = tobii_segment_video.duration * 1e-3
+ metrics['segment_duration (ms)'] = segment_duration
- # Export saccades analysis
- saccades_dataframe = ts_saccades.as_dataframe()
- saccades_dataframe.to_csv(saccades_filepath, index=True)
- print(f'Saccades saved into {saccades_filepath}')
+ fixations_exist = len(ts_fixations) > 0
+ saccades_exist = len(ts_saccades) > 0
+ unknown_exist = len(ts_unknown) > 0
+ status_exist = len(ts_status) > 0
+
+ # Analyse fixations
+ if fixations_exist:
- # Export gaze status analysis
- ts_status.as_dataframe().to_csv(gaze_status_filepath, index=True)
- print(f'Gaze status saved into {gaze_status_filepath}')
+ fixations_dataframe = ts_fixations.as_dataframe()
+ fixations_dataframe.to_csv(fixations_filepath, index=True)
+ print(f'\nFixations saved into {fixations_filepath}')
- # Export gaze metrics
+ exploitation_time = fixations_dataframe.duration.sum() * 1e-3
- # Consider only fixations > duration threshold and saccades < duration threshold
- # This is mostly useful to filter first and last fixation/saccade as the time range can start anywhere
- filtered_fixations = fixations_dataframe[fixations_dataframe.duration > args.duration_threshold*1e3]
- filtered_saccades = saccades_dataframe[saccades_dataframe.duration < args.duration_threshold*1e3]
+ metrics['fixations_number'] = fixations_dataframe.shape[0]
+ metrics['fixations_duration_mean (ms)'] = fixations_dataframe.duration.mean() * 1e-3
+ metrics['exploitation_ratio (%)'] = exploitation_time / segment_duration * 100
- segment_duration = tobii_segment_video.duration * 1e-3
- exploitation_time = filtered_fixations.duration.sum() * 1e-3
- exploration_time = filtered_saccades.duration.sum() * 1e-3
-
- metrics = {
- 'segment_duration (ms)': segment_duration,
- 'fixations_number': filtered_fixations.shape[0],
- 'fixations_duration_mean (ms)': filtered_fixations.duration.mean() * 1e-3,
- 'saccades_number': filtered_saccades.shape[0],
- 'saccades_duration_mean (ms)': filtered_saccades.duration.mean() * 1e-3,
- 'exploitation_ratio (%)': exploitation_time / segment_duration * 100,
- 'exploration_ratio (%)': exploration_time / segment_duration * 100,
- 'exploit_explore_ratio:': exploitation_time / exploration_time
- }
+ # Analyse saccades
+ if saccades_exist:
- metrics_dataframe = pandas.DataFrame(metrics, index=[participant_name])
+ saccades_dataframe = ts_saccades.as_dataframe()
+ saccades_dataframe.to_csv(saccades_filepath, index=True)
+ print(f'Saccades saved into {saccades_filepath}')
+
+ exploration_time = saccades_dataframe.duration.sum() * 1e-3
+
+ metrics['saccades_number'] = saccades_dataframe.shape[0]
+ metrics['saccades_duration_mean (ms)'] = saccades_dataframe.duration.mean() * 1e-3
+ metrics['exploration_ratio (%)'] = exploration_time / segment_duration * 100
+
+ # Export unknown movements analysis
+ if unknown_exist:
+
+ unknown_dataframe = ts_unknown.as_dataframe()
+ unknown_dataframe.to_csv(unknown_filepath, index=True)
+ print(f'Unknown movements saved into {unknown_filepath}')
+
+ unknown_time = unknown_dataframe.duration.sum() * 1e-3
+
+ metrics['unknown_number'] = unknown_dataframe.shape[0]
+ metrics['unknown_duration_mean (ms)'] = unknown_dataframe.duration.mean() * 1e-3
+ metrics['unknown_ratio (%)'] = unknown_time / segment_duration * 100
+
+ if fixations_exist and saccades_exist:
+ metrics['exploit_explore_ratio'] = exploitation_time / exploration_time
+
+ # Export gaze status analysis
+ if status_exist:
+
+ ts_status.as_dataframe().to_csv(gaze_status_filepath, index=True)
+ print(f'Gaze status saved into {gaze_status_filepath}')
+ # Export gaze metrics
+ metrics_dataframe = pandas.DataFrame(metrics, index=[participant_name])
metrics_dataframe.to_csv(gaze_metrics_filepath, index=True)
print(f'Gaze metrics saved into {gaze_metrics_filepath}')
@@ -298,10 +337,15 @@ def main():
# Initialise progress bar
MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGazeMovements visualisation:', suffix = 'Complete', length = 100)
- current_fixation_ts, current_fixation = ts_fixations.pop_first()
- current_fixation_time_counter = 0
+ if fixations_exist:
+ current_fixation_ts, current_fixation = ts_fixations.pop_first()
+ current_fixation_time_counter = 0
- current_saccade_ts, current_saccade = ts_saccades.pop_first()
+ if saccades_exist:
+ current_saccade_ts, current_saccade = ts_saccades.pop_first()
+
+ if unknown_exist:
+ current_unknown_ts, current_unknown = ts_unknown.pop_first()
# Iterate on video frames
for video_ts, video_frame in tobii_segment_video.frames():
@@ -315,83 +359,155 @@ def main():
assert(ts_current_aois == video_ts)
- selected_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))
+ current_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))
# Apply perspective transform algorithm
destination = numpy.float32([[0, 0],[1920, 0],[1920, 1080],[0, 1080]])
- aoi_matrix = cv.getPerspectiveTransform(selected_aoi.astype(numpy.float32), destination)
+ aoi_matrix = cv.getPerspectiveTransform(current_aoi.astype(numpy.float32), destination)
visu_matrix = cv.warpPerspective(video_frame.matrix, aoi_matrix, (1920, 1080))
# Wait for aois projection
except KeyError:
pass
- # Check next fixation
- if video_ts > current_fixation_ts + current_fixation.duration and len(ts_fixations) > 0:
+ if fixations_exist:
+
+ # Check next fixation
+ if video_ts > current_fixation_ts + current_fixation.duration and len(ts_fixations) > 0:
+
+ current_fixation_ts, current_fixation = ts_fixations.pop_first()
+ current_fixation_time_counter = 0
+
+ # While current time belongs to the current fixation
+ if video_ts >= current_fixation_ts and video_ts <= current_fixation_ts + current_fixation.duration:
+
+ current_fixation_time_counter += 1
+
+ # Draw current fixation
+ cv.circle(visu_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.dispersion), (0, 255, 0), current_fixation_time_counter)
+ cv.circle(heatmap_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.dispersion), (0, 255, 0))
+
+ if saccades_exist:
+
+ # Check next saccade
+ if video_ts > current_saccade_ts + current_saccade.duration and len(ts_saccades) > 0:
+
+ current_saccade_ts, current_saccade = ts_saccades.pop_first()
+
+ # While current time belongs to the current saccade
+ if video_ts >= current_saccade_ts and video_ts <= current_saccade_ts + current_saccade.duration:
+
+ # Draw all saccade gaze positions
+ try:
+
+ # Get next saccade position
+ ts_next, _ = current_saccade.positions.first
+
+ # Check next unknown positions is not after current time
+ while ts_next < video_ts:
+
+ ts_start, start_gaze_position = current_saccade.positions.pop_first()
+ ts_next, next_gaze_position = current_saccade.positions.first
- current_fixation_ts, current_fixation = ts_fixations.pop_first()
- current_fixation_time_counter = 0
+ # Draw movement
+ if start_gaze_position.valid and next_gaze_position.valid:
- # While current time belongs to the current fixation
- if video_ts >= current_fixation_ts and video_ts <= current_fixation_ts + current_fixation.duration:
+ int_start_position = (int(start_gaze_position[0]), int(start_gaze_position[1]))
+ int_next_position = (int(next_gaze_position[0]), int(next_gaze_position[1]))
- current_fixation_time_counter += 1
+ cv.line(visu_matrix, int_start_position, int_next_position, (0, 0, 255), 3)
+ cv.line(heatmap_matrix, int_start_position, int_next_position, (0, 0, 255), 3)
- # Draw current fixation
- cv.circle(visu_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.dispersion), (0, 255, 0), current_fixation_time_counter)
- cv.circle(heatmap_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.dispersion), (0, 255, 0))
+ # Empty gaze position
+ except IndexError:
+ pass
- # Check next saccade
- if video_ts > current_saccade_ts + current_saccade.duration and len(ts_saccades) > 0:
+ if unknown_exist:
- current_saccade_ts, current_saccade = ts_saccades.pop_first()
+ # Check next unknown movement
+ if video_ts > current_unknown_ts + current_unknown.duration and len(ts_unknown) > 0:
- # While current time belongs to the current saccade
- if video_ts >= current_saccade_ts and video_ts <= current_saccade_ts + current_saccade.duration:
+ current_unknown_ts, current_unknown = ts_unknown.pop_first()
- start_ts, start_position = current_saccade.positions.first
- end_ts, end_position = current_saccade.positions.last
+ # While current time belongs to the current unknown movement
+ if video_ts >= current_unknown_ts and video_ts <= current_unknown_ts + current_unknown.duration:
- # Draw saccade
- int_start_position = (int(start_position[0]), int(start_position[1]))
- int_end_position = (int(end_position[0]), int(end_position[1]))
+ # Draw all unknown gaze positions
+ try:
- cv.line(visu_matrix, int_start_position, int_end_position, (0, 0, 255), 2)
- cv.line(heatmap_matrix, int_start_position, int_end_position, (0, 0, 255), 2)
+ # Get next unknown position
+ ts_next, _ = current_unknown.positions.first
- # Write start gaze position
- cv.putText(visu_matrix, str(int_start_position), int_start_position, cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA)
- cv.putText(visu_matrix, str(int_end_position), int_end_position, cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA)
+ # Check next unknown positions is not after current time
+ while ts_next < video_ts:
- # Check next gaze
+ ts_start, start_gaze_position = current_unknown.positions.pop_first()
+ ts_next, next_gaze_position = current_unknown.positions.first
+
+ # Draw movement
+ if start_gaze_position.valid and next_gaze_position.valid:
+
+ int_start_position = (int(start_gaze_position[0]), int(start_gaze_position[1]))
+ int_next_position = (int(next_gaze_position[0]), int(next_gaze_position[1]))
+
+ cv.line(visu_matrix, int_start_position, int_next_position, (255, 0, 0), 3)
+ cv.line(heatmap_matrix, int_start_position, int_next_position, (255, 0, 0), 3)
+
+ # Empty gaze position
+ except IndexError:
+ pass
+
+ # Draw all next gaze positions
try:
- # Get closest gaze position before video timestamp and remove all gaze positions before
- ts_nearest, nearest_gaze_position = ts_gaze_positions.pop_last_until(video_ts)
+ # Get next gaze position
+ ts_next, next_gaze_position = ts_gaze_positions.first
- # Draw gaze
- nearest_gaze_position.draw(visu_matrix)
- nearest_gaze_position.draw(heatmap_matrix)
+ # Check next gaze position is not after current time
+ while ts_next < video_ts:
- # Write gaze position
- cv.putText(visu_matrix, str(nearest_gaze_position.value), nearest_gaze_position.value, cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
-
- # Write gaze timing
- cv.rectangle(visu_matrix, (0, 50), (550, 100), (31, 31, 31), -1)
- cv.putText(visu_matrix, f'Gaze time: {ts_nearest*1e-3:.3f} ms', (20, 85), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ ts_start, start_gaze_position = ts_gaze_positions.pop_first()
+ ts_next, next_gaze_position = ts_gaze_positions.first
+
+ # Draw movement
+ if start_gaze_position.valid and next_gaze_position.valid:
+
+ int_start_position = (int(start_gaze_position[0]), int(start_gaze_position[1]))
+ int_next_position = (int(next_gaze_position[0]), int(next_gaze_position[1]))
+
+ cv.line(visu_matrix, int_start_position, int_next_position, (0, 255, 255), 1)
+ cv.line(heatmap_matrix, int_start_position, int_next_position, (0, 255, 255), 1)
+
+ # Draw gaze
+ next_gaze_position.draw(visu_matrix)
+ next_gaze_position.draw(heatmap_matrix)
+
+ # Write last gaze position
+ if next_gaze_position.valid:
+
+ int_next_position = (int(next_gaze_position[0]), int(next_gaze_position[1]))
+ cv.putText(visu_matrix, str(int_next_position), int_next_position, cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
- # Wait for gaze position
- except KeyError:
+ # Empty gaze position
+ except IndexError:
pass
-
+
+ # Write last gaze position timing
+ cv.rectangle(visu_matrix, (0, 50), (550, 100), (31, 31, 31), -1)
+ cv.putText(visu_matrix, f'Gaze time: {ts_next*1e-3:.3f} ms', (20, 85), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+
# Write segment timing
cv.rectangle(visu_matrix, (0, 0), (550, 50), (63, 63, 63), -1)
cv.putText(visu_matrix, f'Video time: {video_ts*1e-3:.3f} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
# Write movement identification parameters
- cv.rectangle(visu_matrix, (0, 100), (550, 200), (63, 63, 63), -1)
- cv.putText(visu_matrix, f'Dispersion threshold: {args.dispersion_threshold} px', (20, 140), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
- cv.putText(visu_matrix, f'Duration threshold: {args.duration_threshold} ms', (20, 190), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+ cv.rectangle(visu_matrix, (0, 100), (550, 260), (63, 63, 63), -1)
+ cv.putText(visu_matrix, f'Dispersion max: {args.dispersion_threshold} px', (20, 160), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+ cv.putText(visu_matrix, f'Duration min: {args.duration_threshold} ms', (20, 220), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+
+ # Draw dispersion threshold circle
+ cv.circle(visu_matrix, (args.dispersion_threshold + 400, 180), 2, (0, 255, 255), -1)
+ cv.circle(visu_matrix, (args.dispersion_threshold + 400, 180), args.dispersion_threshold, (255, 150, 150), 1)
if args.window: