diff options
author | Théo de la Hogue | 2022-12-19 11:40:59 +0100 |
---|---|---|
committer | Théo de la Hogue | 2022-12-19 11:40:59 +0100 |
commit | b2eaff581f544f0d93557767c2d4d5242b1f961b (patch) | |
tree | 85e1e769228fe9e796150128cdc43378c02e9e17 | |
parent | 130ed1bff4df87b1be2b5ff1f0333e3b4cb93383 (diff) | |
download | argaze-b2eaff581f544f0d93557767c2d4d5242b1f961b.zip argaze-b2eaff581f544f0d93557767c2d4d5242b1f961b.tar.gz argaze-b2eaff581f544f0d93557767c2d4d5242b1f961b.tar.bz2 argaze-b2eaff581f544f0d93557767c2d4d5242b1f961b.tar.xz |
Selecting deviation threshold according gaze precision mean. Saving gaze projection to not process it each time.
-rw-r--r-- | src/argaze/utils/tobii_segment_gaze_movements_export.py | 269 |
1 files changed, 162 insertions, 107 deletions
diff --git a/src/argaze/utils/tobii_segment_gaze_movements_export.py b/src/argaze/utils/tobii_segment_gaze_movements_export.py index 3cd6dff..934f340 100644 --- a/src/argaze/utils/tobii_segment_gaze_movements_export.py +++ b/src/argaze/utils/tobii_segment_gaze_movements_export.py @@ -24,7 +24,7 @@ def main(): parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='path to a tobii segment folder', required=True) parser.add_argument('-a', '--aoi', metavar='AOI_NAME', type=str, default=None, help='aoi name where to project gaze', required=True) parser.add_argument('-t', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)') - parser.add_argument('-dev', '--deviation_max_threshold', metavar='DISPERSION_THRESHOLD', type=int, default=50, help='maximal distance for fixation identification in pixel') + parser.add_argument('-dev', '--deviation_max_threshold', metavar='DISPERSION_THRESHOLD', type=int, default=None, help='maximal distance for fixation identification in pixel') parser.add_argument('-dmin', '--duration_min_threshold', metavar='DURATION_MIN_THRESHOLD', type=int, default=200, help='minimal duration for fixation identification in millisecond') parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)') parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction) @@ -60,6 +60,8 @@ def main(): aoi_filepath = f'{destination_path}/../aoi.json' + positions_json_filepath = f'{destination_path}/gaze_positions.json' + fixations_json_filepath = f'{destination_path}/gaze_fixations.json' saccades_json_filepath = f'{destination_path}/gaze_saccades.json' movements_json_filepath = f'{destination_path}/gaze_movements.json' @@ -88,157 +90,211 @@ def main(): tobii_segment_video = tobii_segment.load_video() print(f'\nVideo properties:\n\tduration: {tobii_segment_video.duration/1e6} s\n\twidth: {tobii_segment_video.width} px\n\theight: {tobii_segment_video.height} px') - # Load a tobii segment data - tobii_segment_data = tobii_segment.load_data() - - print(f'\nLoaded data count:') - for name in tobii_segment_data.keys(): - print(f'\t{name}: {len(tobii_segment_data[name])} data') + # Check that gaze positions have already been exported to not process them again + if os.path.exists(positions_json_filepath): - # Access to timestamped gaze position data buffer - tobii_ts_gaze_positions = tobii_segment_data['GazePosition'] + # Load gaze positions + ts_gaze_positions = GazeFeatures.TimeStampedGazePositions.from_json(positions_json_filepath) - # Access to timestamped gaze 3D positions data buffer - tobii_ts_gaze_positions_3d = tobii_segment_data['GazePosition3D'] + print(f'\nLoaded gaze positions count:') + print(f'\tPositions: {len(ts_gaze_positions)}') - # Format tobii gaze position and precision in pixel and project it in aoi scene - ts_gaze_positions = GazeFeatures.TimeStampedGazePositions() + invalid_gaze_position_count = 0 + inner_precisions_px = [] - # Gaze projection metrics - ts_projection_metrics = DataStructures.TimeStampedBuffer() - invalid_gaze_position_count = 0 + for ts, gaze_position in ts_gaze_positions.items(): - # Starting with no AOI projection - ts_current_aoi = 0 - current_aoi = AOIFeatures.AreaOfInterest() + if not gaze_position.valid: - # Initialise progress bar - MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGazePositions projection:', suffix = 'Complete', length = 100) + invalid_gaze_position_count += 1 - for ts, tobii_gaze_position in tobii_ts_gaze_positions.items(): + else: - # Update Progress Bar - progress = ts - int(args.time_range[0] * 1e6) - MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'GazePositions projection:', suffix = 'Complete', length = 100) + inner_precisions_px.append(gaze_position.precision) - # Edit default aoi error - current_aoi_error = 'No available AOI projection' + print(f'\tInvalid positions: {invalid_gaze_position_count}/{len(ts_gaze_positions)} ({100*invalid_gaze_position_count/len(ts_gaze_positions):.2f} %)') - try: + inner_precision_px_mean = round(numpy.mean(inner_precisions_px)) + print(f'\tMean of projected precisions: {inner_precision_px_mean} px') - # Get the last aoi projection until the current gaze position timestamp - ts_current_aois, current_aois = ts_aois_projections.pop_last_until(ts) + # Project gaze positions into the selected AOI + else: - assert(ts_current_aois <= ts) + # Load a tobii segment data + tobii_segment_data = tobii_segment.load_data() + + print(f'\nLoaded data count:') + for name in tobii_segment_data.keys(): + print(f'\t{name}: {len(tobii_segment_data[name])} data') - # Catch aoi error to not update current aoi - if 'error' in current_aois.keys(): + # Access to timestamped gaze position data buffer + tobii_ts_gaze_positions = tobii_segment_data['GazePosition'] - # Remove extra error info after ':' - current_aoi_error = current_aois.pop('error').split(':')[0] + # Access to timestamped gaze 3D positions data buffer + tobii_ts_gaze_positions_3d = tobii_segment_data['GazePosition3D'] - # Or update current aoi - elif args.aoi in current_aois.keys(): + # Format tobii gaze position and precision in pixel and project it in aoi scene + ts_gaze_positions = GazeFeatures.TimeStampedGazePositions() - ts_current_aoi = ts_current_aois - current_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi)) + # Gaze projection metrics + ts_projection_metrics = DataStructures.TimeStampedBuffer() + invalid_gaze_position_count = 0 + inner_precisions_px = [] - current_aoi_error = '' + # Starting with no AOI projection + ts_current_aoi = 0 + current_aoi = AOIFeatures.AreaOfInterest() - # No aoi projection at the beginning - except KeyError as e: - pass + # Initialise progress bar + MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGazePositions projection:', suffix = 'Complete', length = 100) - # Wait for available aoi - if current_aoi.empty: + for ts, tobii_gaze_position in tobii_ts_gaze_positions.items(): - ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(current_aoi_error) - invalid_gaze_position_count += 1 - continue - - # QUESTION: What todo if the current aoi is too old ? - # if the aoi didn't move it is not a problem... - # For the moment, we avoid 1s old aoi and we provide a metric to assess the problem - ts_difference = ts - ts_current_aoi + # Update Progress Bar + progress = ts - int(args.time_range[0] * 1e6) + MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'GazePositions projection:', suffix = 'Complete', length = 100) - # If aoi is not updated after the - if ts_difference >= args.duration_min_threshold*1e3: + # Edit default aoi error + current_aoi_error = 'No available AOI projection' - current_aoi = AOIFeatures.AreaOfInterest() - ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition('AOI projection is too old (> 1s)') - invalid_gaze_position_count += 1 - continue + try: - ts_projection_metrics[ts] = {'frame': ts_current_aois, 'age': ts_difference} + # Get the last aoi projection until the current gaze position timestamp + ts_current_aois, current_aois = ts_aois_projections.pop_last_until(ts) - # Test gaze position validity - if tobii_gaze_position.validity == 0: + assert(ts_current_aois <= ts) - gaze_position_px = (int(tobii_gaze_position.value[0] * tobii_segment_video.width), int(tobii_gaze_position.value[1] * tobii_segment_video.height)) - - # Get gaze position 3D at same gaze position timestamp - tobii_gaze_position_3d = tobii_ts_gaze_positions_3d.pop(ts) + # Catch aoi error to not update current aoi + if 'error' in current_aois.keys(): + + # Remove extra error info after ':' + current_aoi_error = current_aois.pop('error').split(':')[0] + + # Or update current aoi + elif args.aoi in current_aois.keys(): + + ts_current_aoi = ts_current_aois + current_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi)) + + current_aoi_error = '' + + # No aoi projection at the beginning + except KeyError as e: + pass + + # Wait for available aoi + if current_aoi.empty: - # Test gaze position 3d validity - if tobii_gaze_position_3d.validity == 0: + ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(current_aoi_error) + invalid_gaze_position_count += 1 + continue - gaze_precision_mm = numpy.sin(numpy.deg2rad(TobiiSpecifications.PRECISION)) * tobii_gaze_position_3d.value[2] - tobii_camera_hfov_mm = numpy.sin(numpy.deg2rad(TobiiSpecifications.CAMERA_HFOV)) * tobii_gaze_position_3d.value[2] + # QUESTION: What todo if the current aoi is too old ? + # if the aoi didn't move it is not a problem... + # For the moment, we avoid 1s old aoi and we provide a metric to assess the problem + ts_difference = ts - ts_current_aoi + + # If aoi is not updated after the + if ts_difference >= args.duration_min_threshold*1e3: + + current_aoi = AOIFeatures.AreaOfInterest() + ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition('AOI projection is too old (> 1s)') + invalid_gaze_position_count += 1 + continue + + ts_projection_metrics[ts] = {'frame': ts_current_aois, 'age': ts_difference} + + # Test gaze position validity + if tobii_gaze_position.validity == 0: + + gaze_position_px = (int(tobii_gaze_position.value[0] * tobii_segment_video.width), int(tobii_gaze_position.value[1] * tobii_segment_video.height)) - gaze_precision_px = round(tobii_segment_video.width * float(gaze_precision_mm) / float(tobii_camera_hfov_mm)) + # Get gaze position 3D at same gaze position timestamp + tobii_gaze_position_3d = tobii_ts_gaze_positions_3d.pop(ts) - # Edit gaze position - gaze_position = GazeFeatures.GazePosition(gaze_position_px, precision=gaze_precision_px) + # Test gaze position 3d validity + if tobii_gaze_position_3d.validity == 0: + + gaze_precision_mm = numpy.sin(numpy.deg2rad(TobiiSpecifications.PRECISION)) * tobii_gaze_position_3d.value[2] + tobii_camera_hfov_mm = numpy.sin(numpy.deg2rad(TobiiSpecifications.CAMERA_HFOV)) * tobii_gaze_position_3d.value[2] + + gaze_precision_px = round(tobii_segment_video.width * float(gaze_precision_mm) / float(tobii_camera_hfov_mm)) + + # Edit gaze position + gaze_position = GazeFeatures.GazePosition(gaze_position_px) - # Project gaze position into selected aois - if current_aoi.contains_point(gaze_position.value): + # Project gaze position into selected aois + if current_aoi.contains_point(gaze_position.value): - inner_x, inner_y = current_aoi.inner_axis(gaze_position.value) - inner_precision_px = gaze_precision_px * tobii_segment_video.width * tobii_segment_video.height / current_aoi.area + inner_x, inner_y = current_aoi.inner_axis(gaze_position.value) + inner_precision_px = gaze_precision_px * tobii_segment_video.width * tobii_segment_video.height / current_aoi.area - # Store inner gaze position for further movement processing - # TEMP: 1920x1080 are Screen_Plan dimensions - ts_gaze_positions[ts] = GazeFeatures.GazePosition((round(inner_x*1920), round((1.0 - inner_y)*1080)))#, precision=inner_precision_px) + # Store inner precision for metrics + inner_precisions_px.append(inner_precision_px) + + # Store inner gaze position for further movement processing + # TEMP: 1920x1080 are Screen_Plan dimensions + ts_gaze_positions[ts] = GazeFeatures.GazePosition((round(inner_x*1920), round((1.0 - inner_y)*1080)), precision=inner_precision_px) + + else: + + ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(f'GazePosition not inside {args.aoi}') + invalid_gaze_position_count += 1 else: - ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(f'GazePosition not inside {args.aoi}') + ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(f'Invalid Tobii GazePosition3D') invalid_gaze_position_count += 1 else: - ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(f'Invalid Tobii GazePosition3D') + ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(f'Invalid Tobii GazePosition') invalid_gaze_position_count += 1 + print(f'\nGazePositions projection metrics:') + + print(f'\tInvalid positions: {invalid_gaze_position_count}/{len(tobii_ts_gaze_positions)} ({100*invalid_gaze_position_count/len(tobii_ts_gaze_positions):.2f} %)') + + if len(ts_projection_metrics): + + projection_metrics_dataframe = ts_projection_metrics.as_dataframe() + print(f'\tAOI age mean: {projection_metrics_dataframe.age.mean() * 1e-3:.3f} ms') + print(f'\tAOI age max: {projection_metrics_dataframe.age.max() * 1e-3:.3f} ms') + + inner_precision_px_mean = round(numpy.mean(inner_precisions_px)) + print(f'\tMean of projected precisions: {inner_precision_px_mean} px') + else: - ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(f'Invalid Tobii GazePosition') - invalid_gaze_position_count += 1 + print(print(f'\t no AOI projected')) - print(f'\nGazePositions projection metrics:') + ts_gaze_positions.to_json(positions_json_filepath) + print(f'\nProjected gaze positions saved into {positions_json_filepath}') + + print(f'\nGazeMovement identifier setup:') + + if args.deviation_max_threshold == None: + + selected_deviation_max_threshold = inner_precision_px_mean + print(f'\tDispersion threshold: {selected_deviation_max_threshold} px (equal to mean of projected precisions)') - if len(ts_projection_metrics): - projection_metrics_dataframe = ts_projection_metrics.as_dataframe() - print(f'\t AOI age mean (ms): {projection_metrics_dataframe.age.mean() * 1e-3}') - print(f'\t AOI age max (ms): {projection_metrics_dataframe.age.max() * 1e-3}') else: - print(print(f'\t no AOI projected')) - print(f'\t Invalid gaze positions: {invalid_gaze_position_count}') + selected_deviation_max_threshold = args.deviation_max_threshold + print(f'\tDispersion threshold: {selected_deviation_max_threshold} px') - print(f'\nGazeMovement identifier parameters:') - print(f'\tDispersion threshold = {args.deviation_max_threshold}') - print(f'\tDuration threshold = {args.duration_min_threshold}') + print(f'\tDuration threshold: {args.duration_min_threshold} ms') + + movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(selected_deviation_max_threshold, args.duration_min_threshold*1e3) # Start movement identification - movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(args.deviation_max_threshold, args.duration_min_threshold*1e3) ts_fixations = GazeFeatures.TimeStampedGazeMovements() ts_saccades = GazeFeatures.TimeStampedGazeMovements() ts_movements = GazeFeatures.TimeStampedGazeMovements() ts_status = GazeFeatures.TimeStampedGazeStatus() # Initialise progress bar - MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGazeMovements identification:', suffix = 'Complete', length = 100) + #MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGazeMovements identification:', suffix = 'Complete', length = 100) for gaze_movement in movement_identifier(ts_gaze_positions): @@ -274,7 +330,7 @@ def main(): # Update Progress Bar progress = start_ts - int(args.time_range[0] * 1e6) - MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'Gaze movements identification:', suffix = 'Complete', length = 100) + #MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'Gaze movements identification:', suffix = 'Complete', length = 100) print(f'\nGazeMovements identification metrics:') print(f'\t{len(ts_fixations)} fixations found') @@ -388,7 +444,7 @@ def main(): # Draw current fixation cv.circle(visu_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.deviation_max), (0, 255, 0), current_fixation_time_counter) - cv.circle(gaze_status_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.deviation_max), (0, 255, 0)) + cv.circle(gaze_status_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.deviation_max), (0, 155, 0)) if saccades_exist: @@ -472,14 +528,14 @@ def main(): cv.putText(visu_matrix, f'{ts_start*1e-3:.3f} ms: {start_gaze_position.message}', (20, 1060 - (gaze_position_count)*50), cv.FONT_HERSHEY_SIMPLEX, 1, color_error, 1, cv.LINE_AA) # Draw start gaze - start_gaze_position.draw(visu_matrix) - start_gaze_position.draw(gaze_status_matrix) + start_gaze_position.draw(visu_matrix, draw_precision=False) + start_gaze_position.draw(gaze_status_matrix, draw_precision=False) if start_gaze_position.valid and next_gaze_position.valid: # Draw movement from start to next - cv.line(visu_matrix, start_gaze_position, next_gaze_position, (0, 255, 255), 1) - cv.line(gaze_status_matrix, start_gaze_position, next_gaze_position, (0, 255, 255), 1) + cv.line(visu_matrix, start_gaze_position, next_gaze_position, (0, 55, 55), 1) + cv.line(gaze_status_matrix, start_gaze_position, next_gaze_position, (0, 55, 55), 1) gaze_position_count += 1 @@ -487,8 +543,7 @@ def main(): # Write last start gaze position cv.putText(visu_matrix, str(start_gaze_position.value), start_gaze_position.value, cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) - cv.putText(gaze_status_matrix, str(start_gaze_position.value), start_gaze_position.value, cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) - + # Write last start gaze position timing cv.rectangle(visu_matrix, (0, 50), (550, 100), (31, 31, 31), -1) cv.putText(visu_matrix, f'Gaze time: {ts_start*1e-3:.3f} ms', (20, 85), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) @@ -503,12 +558,12 @@ def main(): # Write movement identification parameters cv.rectangle(visu_matrix, (0, 150), (550, 310), (63, 63, 63), -1) - cv.putText(visu_matrix, f'Deviation max: {args.deviation_max_threshold} px', (20, 210), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) + cv.putText(visu_matrix, f'Deviation max: {selected_deviation_max_threshold} px', (20, 210), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) cv.putText(visu_matrix, f'Duration min: {args.duration_min_threshold} ms', (20, 270), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) # Draw dispersion threshold circle - cv.circle(visu_matrix, (args.deviation_max_threshold + 400, 230), 2, (0, 255, 255), -1) - cv.circle(visu_matrix, (args.deviation_max_threshold + 400, 230), args.deviation_max_threshold, (255, 150, 150), 1) + cv.circle(visu_matrix, (selected_deviation_max_threshold + 400, 230), 2, (0, 255, 255), -1) + cv.circle(visu_matrix, (selected_deviation_max_threshold + 400, 230), selected_deviation_max_threshold, (255, 150, 150), 1) if args.window: |