aboutsummaryrefslogtreecommitdiff
path: root/src/argaze/utils/demo_ar_features_run.py
blob: c7f7e38ee3b88331e37a9b3224c30e85b7aaeee0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
#!/usr/bin/env python

""" """

__author__ = "Théo de la Hogue"
__credits__ = []
__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)"
__license__ = "BSD"

import argparse
import contextlib
import os
import time

from argaze import ArFeatures, GazeFeatures

import cv2
import numpy

def main():
    """
    Load AR environment from .json file, detect ArUco markers into camera device images and project it.
    """

    current_directory = os.path.dirname(os.path.abspath(__file__))

    # Manage arguments
    parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
    parser.add_argument('environment', metavar='ENVIRONMENT', type=str, help='ar environment filepath')
    parser.add_argument('-s', '--source', metavar='SOURCE', type=str, default='0', help='video capture source (a number to select camera device or a filepath to load a movie)')
    args = parser.parse_args()

    # Load AR enviroment
    ar_environment = ArFeatures.ArEnvironment.from_json(args.environment)

    # Create a window to display AR environment
    cv2.namedWindow(ar_environment.name, cv2.WINDOW_AUTOSIZE)

    # Init timestamp
    start_time = time.time()

    # Fake gaze position with mouse pointer
    def on_mouse_event(event, x, y, flags, param):

        # Edit millisecond timestamp
        timestamp = int((time.time() - start_time) * 1e3)

        # Project gaze position into environment
        for frame, look_data in ar_environment.look(timestamp, GazeFeatures.GazePosition((x, y))):

            # Unpack look data
            fixation, scan_step_analysis, aoi_scan_step_analysis, times, exception = look_data

            # Do something with look data
            # ...

    # Attach mouse callback to window
    cv2.setMouseCallback(ar_environment.name, on_mouse_event)

    # Enable camera video capture into separate thread
    video_capture = cv2.VideoCapture(int(args.source) if args.source.isdecimal() else args.source)

    # Waiting for 'ctrl+C' interruption
    with contextlib.suppress(KeyboardInterrupt):

        # Capture images
        while video_capture.isOpened():

            # Read video image
            success, video_image = video_capture.read()

            if success:

                # Detect and project environment
                detection_time, exceptions = ar_environment.detect_and_project(video_image)

                # Create environment image
                environment_image = ar_environment.image

                # Write detection fps
                cv2.rectangle(environment_image, (0, 0), (420, 50), (63, 63, 63), -1)
                cv2.putText(environment_image, f'Detection fps: {1e3/detection_time:.1f}', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)

                # Handle exceptions
                for i, (scene_name, e) in enumerate(exceptions.items()):

                     # Write errors
                    cv2.rectangle(environment_image, (0, (i+1)*50), (720, (i+2)*50), (127, 127, 127), -1)
                    cv2.putText(environment_image, f'{scene_name} error: {e}', (20, (i+1)*90), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)

                # Draw environment info
                ar_environment.draw(environment_image)

                # Display environment
                cv2.imshow(ar_environment.name, environment_image)

                # Draw and display each aoi frames
                for aoi_frame in ar_environment.aoi_frames:

                    # Create frame image 
                    aoi_frame_image = aoi_frame.image

                    # Draw frame info
                    aoi_frame.draw(aoi_frame_image)

                    # Display frame
                    cv2.imshow(f'{aoi_frame.parent.name}:{aoi_frame.name}', aoi_frame_image)

            # Stop by pressing 'Esc' key
            if cv2.waitKey(10) == 27:

                # Close camera video capture
                video_capture.release()

    # Stop image display
    cv2.destroyAllWindows()

if __name__ == '__main__':

    main()