#!/usr/bin/env python """ """ __author__ = "Théo de la Hogue" __credits__ = [] __copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)" __license__ = "BSD" import argparse import contextlib import os import time from argaze import ArFeatures, GazeFeatures import cv2 import numpy def main(): """ Load AR environment from .json file, detect ArUco markers into camera device images and project it. """ current_directory = os.path.dirname(os.path.abspath(__file__)) # Manage arguments parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) parser.add_argument('environment', metavar='ENVIRONMENT', type=str, help='ar environment filepath') parser.add_argument('-s', '--source', metavar='SOURCE', type=str, default='0', help='video capture source (a number to select camera device or a filepath to load a movie)') args = parser.parse_args() # Load AR enviroment ar_environment = ArFeatures.ArEnvironment.from_json(args.environment) # Create a window to display AR environment cv2.namedWindow(ar_environment.name, cv2.WINDOW_AUTOSIZE) # Init timestamp start_time = time.time() # Fake gaze position with mouse pointer def on_mouse_event(event, x, y, flags, param): # Edit millisecond timestamp timestamp = int((time.time() - start_time) * 1e3) # Project gaze position into environment for frame, look_data in ar_environment.look(timestamp, GazeFeatures.GazePosition((x, y))): # Unpack look data fixation, scan_step_analysis, aoi_scan_step_analysis, times, exception = look_data # Do something with look data # ... # Attach mouse callback to window cv2.setMouseCallback(ar_environment.name, on_mouse_event) # Enable camera video capture into separate thread video_capture = cv2.VideoCapture(int(args.source) if args.source.isdecimal() else args.source) # Waiting for 'ctrl+C' interruption with contextlib.suppress(KeyboardInterrupt): # Capture images while video_capture.isOpened(): # Read video image success, video_image = video_capture.read() if success: # Detect and project environment detection_time, exceptions = ar_environment.detect_and_project(video_image) # Create environment image environment_image = ar_environment.image # Write detection fps cv2.rectangle(environment_image, (0, 0), (420, 50), (63, 63, 63), -1) cv2.putText(environment_image, f'Detection fps: {1e3/detection_time:.1f}', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) # Handle exceptions for i, (scene_name, e) in enumerate(exceptions.items()): # Write errors cv2.rectangle(environment_image, (0, (i+1)*50), (720, (i+2)*50), (127, 127, 127), -1) cv2.putText(environment_image, f'{scene_name} error: {e}', (20, (i+1)*90), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) # Draw environment info ar_environment.draw(environment_image) # Display environment cv2.imshow(ar_environment.name, environment_image) # Draw and display each aoi frames for aoi_frame in ar_environment.aoi_frames: # Create frame image aoi_frame_image = aoi_frame.image # Draw frame info aoi_frame.draw(aoi_frame_image) # Display frame cv2.imshow(f'{aoi_frame.parent.name}:{aoi_frame.name}', aoi_frame_image) # Stop by pressing 'Esc' key if cv2.waitKey(10) == 27: # Close camera video capture video_capture.release() # Stop image display cv2.destroyAllWindows() if __name__ == '__main__': main()