From a832ff17b68b91404f9a14672f2ba0ab20daa473 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 23 Nov 2022 11:57:01 +0100 Subject: Renaming Movement into GazeMovement. Using microsecond instead of millisecond. --- .../utils/tobii_segment_gaze_movements_export.py | 58 +++++++++++----------- 1 file changed, 28 insertions(+), 30 deletions(-) (limited to 'src') diff --git a/src/argaze/utils/tobii_segment_gaze_movements_export.py b/src/argaze/utils/tobii_segment_gaze_movements_export.py index 7984eed..0049e15 100644 --- a/src/argaze/utils/tobii_segment_gaze_movements_export.py +++ b/src/argaze/utils/tobii_segment_gaze_movements_export.py @@ -55,8 +55,8 @@ def main(): os.makedirs(destination_path) print(f'{destination_path} folder created') - fixations_filepath = f'{destination_path}/movements_fixations.csv' - saccades_filepath = f'{destination_path}/movements_saccades.csv' + fixations_filepath = f'{destination_path}/gaze_fixations.csv' + saccades_filepath = f'{destination_path}/gaze_saccades.csv' gaze_status_filepath = f'{destination_path}/gaze_status.csv' gaze_status_video_filepath = f'{destination_path}/gaze_status.mp4' @@ -102,45 +102,45 @@ def main(): gaze_accuracy_px = round(tobii_segment_video.width * float(gaze_accuracy_mm) / float(tobii_camera_hfov_mm)) - # Store gaze position using millisecond unit timestamp - ts_gaze_positions[ts/1e3] = GazeFeatures.GazePosition(gaze_position_px, accuracy=gaze_accuracy_px) + # Store gaze position + ts_gaze_positions[ts] = GazeFeatures.GazePosition(gaze_position_px, accuracy=gaze_accuracy_px) continue # Store unvalid gaze position for further movement processing - ts_gaze_positions[ts/1e3] = GazeFeatures.UnvalidGazePosition() + ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition() - print(f'Movement identifier parameters:') + print(f'GazeMovement identifier parameters:') print(f'\tDispersion threshold = {args.dispersion_threshold}') print(f'\tDuration threshold = {args.duration_threshold}') # Start movement identification - movement_identifier = GazeFeatures.DispersionBasedMovementIdentifier(ts_gaze_positions, args.dispersion_threshold, args.duration_threshold) - fixations = GazeFeatures.TimeStampedMovements() - saccades = GazeFeatures.TimeStampedMovements() + movement_identifier = GazeFeatures.DispersionBasedGazeMovementIdentifier(ts_gaze_positions, args.dispersion_threshold, args.duration_threshold*1e3) + fixations = GazeFeatures.TimeStampedGazeMovements() + saccades = GazeFeatures.TimeStampedGazeMovements() gaze_status = GazeFeatures.TimeStampedGazeStatus() # Initialise progress bar - MiscFeatures.printProgressBar(0, int(tobii_segment_video.duration/1e3), prefix = 'Movements identification:', suffix = 'Complete', length = 100) + MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = 'GazeMovements identification:', suffix = 'Complete', length = 100) - for item in movement_identifier: + for gaze_movement in movement_identifier: - if isinstance(item, GazeFeatures.DispersionBasedMovementIdentifier.DispersionBasedFixation): + if isinstance(gaze_movement, GazeFeatures.DispersionBasedGazeMovementIdentifier.DispersionBasedFixation): - start_ts, start_position = item.positions.first + start_ts, start_position = gaze_movement.positions.first - fixations[start_ts] = item + fixations[start_ts] = gaze_movement - for ts, position in item.positions.items(): + for ts, position in gaze_movement.positions.items(): gaze_status[ts] = GazeFeatures.GazeStatus(position, 'Fixation', len(fixations)) - elif isinstance(item, GazeFeatures.DispersionBasedMovementIdentifier.DispersionBasedSaccade): + elif isinstance(gaze_movement, GazeFeatures.DispersionBasedGazeMovementIdentifier.DispersionBasedSaccade): - start_ts, start_position = item.positions.first - end_ts, end_position = item.positions.last + start_ts, start_position = gaze_movement.positions.first + end_ts, end_position = gaze_movement.positions.last - saccades[start_ts] = item + saccades[start_ts] = gaze_movement gaze_status[start_ts] = GazeFeatures.GazeStatus(start_position, 'Saccade', len(saccades)) gaze_status[end_ts] = GazeFeatures.GazeStatus(end_position, 'Saccade', len(saccades)) @@ -149,8 +149,8 @@ def main(): continue # Update Progress Bar - progress = ts - int(args.time_range[0] * 1e3) - MiscFeatures.printProgressBar(progress, int(tobii_segment_video.duration/1e3), prefix = 'Movements identification:', suffix = 'Complete', length = 100) + progress = ts - int(args.time_range[0] * 1e6) + MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'GazeMovements identification:', suffix = 'Complete', length = 100) print(f'\n{len(fixations)} fixations and {len(saccades)} saccades found') @@ -173,7 +173,7 @@ def main(): try: # Initialise progress bar - MiscFeatures.printProgressBar(0, tobii_segment_video.duration/1e3, prefix = 'Video with movements processing:', suffix = 'Complete', length = 100) + MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = 'Video with movements processing:', suffix = 'Complete', length = 100) current_fixation_ts, current_fixation = fixations.pop_first() current_fixation_time_counter = 0 @@ -183,12 +183,10 @@ def main(): # Iterate on video frames for video_ts, video_frame in tobii_segment_video.frames(): - video_ts_ms = video_ts / 1e3 - # Draw current fixation if len(fixations) > 0: - if video_ts_ms > current_fixation_ts + current_fixation.duration: + if video_ts > current_fixation_ts + current_fixation.duration: current_fixation_ts, current_fixation = fixations.pop_first() current_fixation_time_counter = 1 @@ -196,7 +194,7 @@ def main(): # Draw saccade if len(saccades) > 0: - if video_ts_ms > current_saccade_ts + current_saccade.duration: + if video_ts > current_saccade_ts + current_saccade.duration: current_saccade_ts, current_saccade = saccades.pop_first() start_ts, start_position = current_saccade.positions.pop_first() @@ -213,7 +211,7 @@ def main(): try: # Get closest gaze position before video timestamp and remove all gaze positions before - _, nearest_gaze_position = ts_gaze_positions.pop_first_until(video_ts_ms) + _, nearest_gaze_position = ts_gaze_positions.pop_first_until(video_ts) # Draw gaze nearest_gaze_position.draw(video_frame.matrix) @@ -224,7 +222,7 @@ def main(): # Write segment timing cv.rectangle(video_frame.matrix, (0, 0), (550, 50), (63, 63, 63), -1) - cv.putText(video_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) + cv.putText(video_frame.matrix, f'Segment time: {int(video_ts/1e3)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) # Write movement identification parameters cv.rectangle(video_frame.matrix, (0, 90), (550, 150), (63, 63, 63), -1) @@ -244,8 +242,8 @@ def main(): output_video.write(video_frame.matrix) # Update Progress Bar - progress = video_ts_ms - int(args.time_range[0] * 1e3) - MiscFeatures.printProgressBar(progress, tobii_segment_video.duration/1e3, prefix = 'Video with movements processing:', suffix = 'Complete', length = 100) + progress = video_ts - int(args.time_range[0] * 1e6) + MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'Video with movements processing:', suffix = 'Complete', length = 100) # Exit on 'ctrl+C' interruption except KeyboardInterrupt: -- cgit v1.1