From 40787d693268f355670775dd31da4f0a2084fd64 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 6 Sep 2022 13:40:04 +0200 Subject: Evaluating gaze precision using gaze position 3D data. --- .../export_tobii_segment_aruco_visual_scan.py | 49 ++++++++++++++++------ 1 file changed, 36 insertions(+), 13 deletions(-) diff --git a/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py b/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py index 2d5d186..8da9f41 100644 --- a/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py +++ b/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py @@ -90,8 +90,7 @@ def main(): tobii_ts_gaze_positions = tobii_segment_data['GazePosition'] # Access to timestamped gaze 3D positions data buffer - #tobii_ts_gaze_3d_positions = tobii_segment_data.gidx_gp3 - #print(f'{len(tobii_ts_gaze_3d_positions)} gaze 3D positions loaded') + tobii_ts_gaze_positions_3d = tobii_segment_data['GazePosition3D'] # Prepare video exportation at the same format than segment video output_video = TobiiVideo.TobiiVideoOutput(vs_video_filepath, tobii_segment_video.get_stream()) @@ -181,11 +180,14 @@ def main(): # Create timestamped buffer to store gaze positions in time ts_gaze_positions = GazeFeatures.TimeStampedGazePositions() + # Create timestamped buffer to store gaze precision in time + ts_gaze_precisions = GazeFeatures.TimeStampedGazePrecisions() + # Video and data replay loop try: # Initialise progress bar - MiscFeatures.printProgressBar(0, tobii_segment_video.get_duration()/1000, prefix = 'Progress:', suffix = 'Complete', length = 100) + #MiscFeatures.printProgressBar(0, tobii_segment_video.get_duration()/1000, prefix = 'Progress:', suffix = 'Complete', length = 100) # Iterate on video frames for video_ts, video_frame in tobii_segment_video.frames(): @@ -197,22 +199,43 @@ def main(): try: + # !!! the parameters below are specific to the TobiiGlassesPro2 !!! + camera_precision = 1.5 + camera_opening = 82 + # Get nearest gaze position before video timestamp and remove all gaze positions before - nearest_gaze_ts, nearest_gaze_position = tobii_ts_gaze_positions.pop_first_until(video_ts) + _, nearest_gaze_position = tobii_ts_gaze_positions.pop_first_until(video_ts) + + # Get nearest gaze position 3D before video timestamp and remove all gaze positions before + _, nearest_gaze_position_3d = tobii_ts_gaze_positions_3d.pop_first_until(video_ts) + + # Consider gaze position if gaze precision can be evaluated + if nearest_gaze_position_3d.value[2] > 0: + + gaze_position_pixel = (int(nearest_gaze_position.value[0] * video_frame.width), int(nearest_gaze_position.value[1] * video_frame.height)) + + gaze_precision_mm = numpy.sin(numpy.deg2rad(camera_precision)) * nearest_gaze_position_3d.value[2] + camera_opening_mm = numpy.sin(numpy.deg2rad(camera_opening)) * nearest_gaze_position_3d.value[2] + gaze_precision_pixel = round(video_frame.width * float(gaze_precision_mm) / float(camera_opening_mm)) + + # Draw gaze position and precision + cv.circle(video_frame.matrix, gaze_position_pixel, 2, (0, 255, 255), -1) + cv.circle(video_frame.matrix, gaze_position_pixel, gaze_precision_pixel, (0, 255, 255), 1) + + # Store gaze position and precision at this time in millisecond + ts_gaze_positions[round(video_ts_ms)] = gaze_position_pixel + ts_gaze_precisions[round(video_ts_ms)] = gaze_precision_pixel - # Draw gaze position - video_gaze_pixel = (int(nearest_gaze_position.value[0] * video_frame.width), int(nearest_gaze_position.value[1] * video_frame.height)) - cv.circle(video_frame.matrix, video_gaze_pixel, 4, (0, 255, 255), -1) + else: - # Store gaze position at this time in millisecond - ts_gaze_positions[round(video_ts_ms)] = video_gaze_pixel + ValueError('Unable to evaluate gaze precision') # Wait for gaze position except ValueError: continue # Track markers with pose estimation and draw them - aruco_tracker.track(video_frame.matrix, check_rotation=False) + aruco_tracker.track(video_frame.matrix) aruco_tracker.draw(video_frame.matrix) # Draw focus area @@ -265,7 +288,7 @@ def main(): if aoi2D_visu_scene == None: continue - look_at = aoi2D_video_scene['Visualisation_Plan'].look_at(video_gaze_pixel) + look_at = aoi2D_video_scene['Visualisation_Plan'].look_at(gaze_position_pixel) visu_gaze_pixel = aoi2D_visu_scene['Visualisation_Plan'].looked_pixel(look_at) cv.circle(aoi2D_visu_frame, visu_gaze_pixel, 4, (0, 0, 255), -1) @@ -275,7 +298,7 @@ def main(): for name, aoi_array in aoi2D_dict.items(): aoi2D_merged_scene[name] = numpy.sum(aoi_array, axis=0) / len(aoi_array) - aoi2D_merged_scene.draw(video_frame.matrix, video_gaze_pixel, exclude=['Visualisation_Plan']) + aoi2D_merged_scene.draw(video_frame.matrix, gaze_position_pixel, exclude=['Visualisation_Plan']) # Store 2D merged scene at this time in millisecond ts_aois_scenes[round(video_ts_ms)] = aoi2D_merged_scene @@ -298,7 +321,7 @@ def main(): # Update Progress Bar progress = video_ts_ms - int(args.time_range[0] * 1000) - MiscFeatures.printProgressBar(progress, tobii_segment_video.get_duration()/1000, prefix = 'Progress:', suffix = 'Complete', length = 100) + #MiscFeatures.printProgressBar(progress, tobii_segment_video.get_duration()/1000, prefix = 'Progress:', suffix = 'Complete', length = 100) # Exit on 'ctrl+C' interruption except KeyboardInterrupt: -- cgit v1.1