From 82f70c15876c93caa1836910975bf28a770857d1 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 10 May 2023 15:12:30 +0200 Subject: Using new I-VT identification algorithm --- src/argaze/utils/demo_gaze_features_run.py | 48 ++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 13 deletions(-) (limited to 'src') diff --git a/src/argaze/utils/demo_gaze_features_run.py b/src/argaze/utils/demo_gaze_features_run.py index 4a6527c..3eaa79f 100644 --- a/src/argaze/utils/demo_gaze_features_run.py +++ b/src/argaze/utils/demo_gaze_features_run.py @@ -20,6 +20,7 @@ def main(): parser.add_argument('environment', metavar='ENVIRONMENT', type=str, help='ar environment filepath') parser.add_argument('-dev', '--deviation_max_threshold', metavar='DEVIATION_MAX_THRESHOLD', type=int, default=50, help='maximal distance for fixation identification in pixel') + parser.add_argument('-vel', '--velocity_max_threshold', metavar='VELOCITY_MAX_THRESHOLD', type=int, default=1, help='maximal velocity for fixation identification in pixel/millisecond') parser.add_argument('-dmin', '--duration_min_threshold', metavar='DURATION_MIN_THRESHOLD', type=int, default=200, help='minimal duration for fixation identification in millisecond') parser.add_argument('-s', '--window-size', metavar='WINDOW_SIZE', type=tuple, default=(1920, 1080), help='size of window in pixel') args = parser.parse_args() @@ -37,12 +38,18 @@ def main(): window_name = "AOI Scene" cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE) - # Init gaze movements processing + # Init gaze processing gaze_position = GazeFeatures.GazePosition() - gaze_movement_identifier = DispersionThresholdIdentification.GazeMovementIdentifier(args.deviation_max_threshold, args.duration_min_threshold) + gaze_movement_identifier = { + 'I-DT': DispersionThresholdIdentification.GazeMovementIdentifier(args.deviation_max_threshold, args.duration_min_threshold), + 'I-VT': VelocityThresholdIdentification.GazeMovementIdentifier(args.velocity_max_threshold, args.duration_min_threshold) + } + identification_mode = 'I-DT' + visual_scan_path = GazeFeatures.VisualScanPath() tpm = TransitionProbabilityMatrix.VisualScanPathAnalyzer() tpm_analysis = None + gaze_movement_lock = threading.Lock() # Init timestamp @@ -67,16 +74,16 @@ def main(): # Lock gaze movement exploitation gaze_movement_lock.acquire() - # Identify gaze movement - gaze_movement = gaze_movement_identifier.identify(data_ts, gaze_position) + # Identify gaze movement accordding select identification mode + gaze_movement = gaze_movement_identifier[identification_mode].identify(data_ts, gaze_position) - if isinstance(gaze_movement, DispersionThresholdIdentification.Fixation): + if GazeFeatures.is_fixation(gaze_movement): # Does the fixation match an AOI? look_at = 'Screen' for name, aoi in aoi_scene_projection.items(): - _, _, circle_ratio = aoi.circle_intersection(gaze_movement.centroid, args.deviation_max_threshold) + _, _, circle_ratio = aoi.circle_intersection(gaze_movement.focus, args.deviation_max_threshold) if circle_ratio > 0.25: @@ -101,7 +108,7 @@ def main(): print(f'Error on {e.aoi} step:', e) - if isinstance(gaze_movement, DispersionThresholdIdentification.Saccade): + elif GazeFeatures.is_saccade(gaze_movement): # Append saccade to visual scan path visual_scan_path.append_saccade(data_ts, gaze_movement) @@ -122,19 +129,22 @@ def main(): aoi_matrix = numpy.full((int(args.window_size[1]), int(args.window_size[0]), 3), 0, dtype=numpy.uint8) + # Write identification mode + cv2.putText(aoi_matrix, f'Gaze movement identification mode: {identification_mode} (Press \'m\' key to switch)', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) + # Lock gaze movement identification gaze_movement_lock.acquire() # Check fixation identification - if gaze_movement_identifier.current_fixation != None: + if gaze_movement_identifier[identification_mode].current_fixation != None: - current_fixation = gaze_movement_identifier.current_fixation + current_fixation = gaze_movement_identifier[identification_mode].current_fixation # Draw looked AOI - aoi_scene_projection.draw_circlecast(aoi_matrix, current_fixation.centroid, current_fixation.deviation_max) + aoi_scene_projection.draw_circlecast(aoi_matrix, current_fixation.focus, current_fixation.deviation_max) # Draw current fixation - cv2.circle(aoi_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.deviation_max), (0, 255, 0), len(current_fixation.positions)) + cv2.circle(aoi_matrix, (int(current_fixation.focus[0]), int(current_fixation.focus[1])), int(current_fixation.deviation_max), (0, 255, 0), len(current_fixation.positions)) # Draw current fixation gaze positions gaze_positions = current_fixation.positions.copy() @@ -158,9 +168,9 @@ def main(): aoi_scene_projection.draw(aoi_matrix, color=(0, 0, 255)) # Check saccade identification - if gaze_movement_identifier.current_saccade != None: + if gaze_movement_identifier[identification_mode].current_saccade != None: - current_saccade = gaze_movement_identifier.current_saccade + current_saccade = gaze_movement_identifier[identification_mode].current_saccade # Draw current saccade gaze positions gaze_positions = current_saccade.positions.copy() @@ -181,6 +191,18 @@ def main(): # Draw frame cv2.imshow(window_name, aoi_matrix) + key_pressed = cv2.waitKey(10) + + #if key_pressed != -1: + # print(key_pressed) + + # Switch identification mode with 'm' key + if key_pressed == 109: + + mode_list = list(gaze_movement_identifier.keys()) + current_index = mode_list.index(identification_mode) + 1 + identification_mode = mode_list[current_index % len(mode_list)] + # Stop calibration by pressing 'Esc' key if cv2.waitKey(10) == 27: break -- cgit v1.1