aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/argaze/utils/tobii_segment_gaze_movements_export.py89
1 files changed, 60 insertions, 29 deletions
diff --git a/src/argaze/utils/tobii_segment_gaze_movements_export.py b/src/argaze/utils/tobii_segment_gaze_movements_export.py
index 0f7bd64..88f61d7 100644
--- a/src/argaze/utils/tobii_segment_gaze_movements_export.py
+++ b/src/argaze/utils/tobii_segment_gaze_movements_export.py
@@ -73,7 +73,7 @@ def main():
ts_aois_projections = DataStructures.TimeStampedBuffer.from_json(aoi_filepath)
print(f'\nAOI frames: ', len(ts_aois_projections))
- aoi_names = ts_aois_projections.as_dataframe().drop(['offset','comment'], axis=1).columns
+ aoi_names = ts_aois_projections.as_dataframe().drop(['offset','error'], axis=1).columns
for aoi_name in aoi_names:
print(f'\t{aoi_name}')
@@ -108,6 +108,9 @@ def main():
# Gaze projection metrics
ts_projection_metrics = DataStructures.TimeStampedBuffer()
+ # Starting with no AOI projection
+ selected_aoi = AOIFeatures.AreaOfInterest()
+
# Initialise progress bar
MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGazePositions projection:', suffix = 'Complete', length = 100)
@@ -124,19 +127,32 @@ def main():
assert(ts_current_aois <= ts)
- # QUESTION: What todo if the current aoi is too old ?
- # if the aoi didn't move it is not a problem...
- # For the moment, we just provide a metric to assess its not too big
- ts_projection_metrics[ts] = {'frame': ts_current_aois, 'age': ts - ts_current_aois}
+ # Is the AOI projection valid ?
+ if 'error' in current_aois.keys():
+
+ # TODO: display error
+ current_aoi_error = current_aois.pop('error')
- current_aoi_offset = current_aois.pop('offset')
- current_aoi_comment = current_aois.pop('comment')
+ # Wait for valid aoi projection
+ continue
+
+ # Is the selected aoi there ?
+ if args.aoi in current_aois.keys():
+
+ selected_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))
+ selected_ts_aoi = ts_current_aois
- selected_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))
+ # else use last one if exist
+ elif selected_aoi.empty:
+ continue
- # Wait for aois projection
- except KeyError:
+ # QUESTION: What todo if the current valid aoi is too old ?
+ # if the aoi didn't move it is not a problem...
+ # For the moment, we just provide a metric to assess its not too big
+ ts_projection_metrics[ts] = {'frame': selected_ts_aoi, 'age': ts - selected_ts_aoi}
+ # Wait for a first aoi projection
+ except KeyError as e:
continue
# Test gaze position validity
@@ -162,14 +178,18 @@ def main():
if selected_aoi.contains_point(gaze_position.value):
inner_x, inner_y = selected_aoi.inner_axis(gaze_position.value)
+ inner_precision_px = gaze_precision_px * tobii_segment_video.width * tobii_segment_video.height / selected_aoi.area
# Store inner gaze position for further movement processing
- ts_gaze_positions[ts] = GazeFeatures.GazePosition((round(inner_x*1920), round(inner_y*1080))) # TEMP: This is Screen_Plan dimension
-
- continue
+ # TEMP: 1920x1080 are Screen_Plan dimensions
+ # TODO? trunc ts at the millisecond before int(ts*1e-3)*1e3
+ ts_gaze_positions[ts] = GazeFeatures.GazePosition((round(inner_x*1920), round(inner_y*1080)))#, precision=inner_precision_px)
# Store unvalid gaze position for further movement processing
- ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition()
+ else:
+
+ # TODO? trunc ts at the millisecond before int(ts*1e-3)*1e3
+ ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition()
print(f'\nGazePositions projection metrics:')
projection_metrics_dataframe = ts_projection_metrics.as_dataframe()
@@ -297,7 +317,7 @@ def main():
selected_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))
- # Apply Perspective Transform Algorithm
+ # Apply perspective transform algorithm
destination = numpy.float32([[0, 0],[1920, 0],[1920, 1080],[0, 1080]])
aoi_matrix = cv.getPerspectiveTransform(selected_aoi.astype(numpy.float32), destination)
visu_matrix = cv.warpPerspective(video_frame.matrix, aoi_matrix, (1920, 1080))
@@ -306,8 +326,14 @@ def main():
except KeyError:
pass
+ # Check next fixation
+ if video_ts > current_fixation_ts + current_fixation.duration and len(ts_fixations) > 0:
+
+ current_fixation_ts, current_fixation = ts_fixations.pop_first()
+ current_fixation_time_counter = 0
+
# While current time belongs to the current fixation
- if video_ts >= current_fixation_ts and video_ts < current_fixation_ts + current_fixation.duration:
+ if video_ts >= current_fixation_ts and video_ts <= current_fixation_ts + current_fixation.duration:
current_fixation_time_counter += 1
@@ -315,11 +341,10 @@ def main():
cv.circle(visu_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.dispersion), (0, 255, 0), current_fixation_time_counter)
cv.circle(heatmap_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.dispersion), (0, 255, 0))
- # Check next fixation
- elif video_ts >= current_fixation_ts + current_fixation.duration and len(ts_fixations) > 0:
+ # Check next saccade
+ if video_ts > current_saccade_ts + current_saccade.duration and len(ts_saccades) > 0:
- current_fixation_ts, current_fixation = ts_fixations.pop_first()
- current_fixation_time_counter = 0
+ current_saccade_ts, current_saccade = ts_saccades.pop_first()
# While current time belongs to the current saccade
if video_ts >= current_saccade_ts and video_ts <= current_saccade_ts + current_saccade.duration:
@@ -334,33 +359,39 @@ def main():
cv.line(visu_matrix, int_start_position, int_end_position, (0, 0, 255), 2)
cv.line(heatmap_matrix, int_start_position, int_end_position, (0, 0, 255), 2)
- # Check next saccade
- elif video_ts > current_saccade_ts + current_saccade.duration and len(ts_saccades) > 0:
-
- current_saccade_ts, current_saccade = ts_saccades.pop_first()
+ # Write start gaze position
+ cv.putText(visu_matrix, str(int_start_position), int_start_position, cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA)
+ cv.putText(visu_matrix, str(int_end_position), int_end_position, cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA)
# Check next gaze
try:
# Get closest gaze position before video timestamp and remove all gaze positions before
- _, nearest_gaze_position = ts_gaze_positions.pop_last_before(video_ts)
+ ts_nearest, nearest_gaze_position = ts_gaze_positions.pop_last_until(video_ts)
# Draw gaze
nearest_gaze_position.draw(visu_matrix)
nearest_gaze_position.draw(heatmap_matrix)
+ # Write gaze position
+ cv.putText(visu_matrix, str(nearest_gaze_position.value), nearest_gaze_position.value, cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+
+ # Write gaze timing
+ cv.rectangle(visu_matrix, (0, 50), (550, 100), (31, 31, 31), -1)
+ cv.putText(visu_matrix, f'Gaze time: {ts_nearest*1e-3:.3f} ms', (20, 85), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+
# Wait for gaze position
except KeyError:
pass
# Write segment timing
cv.rectangle(visu_matrix, (0, 0), (550, 50), (63, 63, 63), -1)
- cv.putText(visu_matrix, f'Segment time: {int(video_ts/1e3)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+ cv.putText(visu_matrix, f'Video time: {video_ts*1e-3:.3f} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
# Write movement identification parameters
- cv.rectangle(visu_matrix, (0, 90), (550, 150), (63, 63, 63), -1)
- cv.putText(visu_matrix, f'Dispersion threshold: {args.dispersion_threshold} px', (20, 100), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
- cv.putText(visu_matrix, f'Duration threshold: {args.duration_threshold} ms', (20, 140), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+ cv.rectangle(visu_matrix, (0, 100), (550, 200), (63, 63, 63), -1)
+ cv.putText(visu_matrix, f'Dispersion threshold: {args.dispersion_threshold} px', (20, 140), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+ cv.putText(visu_matrix, f'Duration threshold: {args.duration_threshold} ms', (20, 190), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
if args.window: