aboutsummaryrefslogtreecommitdiff
path: root/src/argaze/utils/demo_gaze_features_run.py
blob: 0efb232e1582cb636bd081eb3bbfe299b7c2dc40 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
#!/usr/bin/env python

import argparse
import time
import threading

from argaze import ArFeatures, GazeFeatures
from argaze.GazeAnalysis import *

import cv2
import numpy

def main():
    """
    Load AR environment from .json file to project AOI scene on screen and use mouse pointer to simulate gaze positions.
    """

    # Manage arguments
    parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
    parser.add_argument('environment', metavar='ENVIRONMENT', type=str, help='ar environment filepath')

    parser.add_argument('-dev', '--deviation_max_threshold', metavar='DEVIATION_MAX_THRESHOLD', type=int, default=50, help='maximal distance for fixation identification in pixel')
    parser.add_argument('-dmin', '--duration_min_threshold', metavar='DURATION_MIN_THRESHOLD', type=int, default=200, help='minimal duration for fixation identification in millisecond')
    parser.add_argument('-s', '--window-size', metavar='WINDOW_SIZE', type=tuple, default=(1920, 1080), help='size of window in pixel')
    args = parser.parse_args()

    # Load AR enviroment
    demo_environment = ArFeatures.ArEnvironment.from_json(args.environment)

    # Access to main AR scene
    demo_scene = demo_environment.scenes["AR Scene Demo"]

    # Project AOI scene onto Full HD screen
    aoi_scene_projection = demo_scene.orthogonal_projection * args.window_size

    # Create a window to display AR environment
    window_name = "AOI Scene"
    cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)

    # Init gaze movement identification
    gaze_position = GazeFeatures.GazePosition()
    gaze_movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(args.deviation_max_threshold, args.duration_min_threshold)
    gaze_movement_lock = threading.Lock()

    # Init timestamp
    start_ts = time.time()

    # Update pointer position
    def on_mouse_event(event, x, y, flags, param):

        nonlocal gaze_position

        # Edit millisecond timestamp
        data_ts = int((time.time() - start_ts) * 1e3)

        # Update gaze position with mouse pointer position
        gaze_position = GazeFeatures.GazePosition((x, y))

        # Don't identify gaze movement while former identification is exploited in video loop
        if gaze_movement_lock.locked():
            return

        # Lock gaze movement exploitation
        gaze_movement_lock.acquire()

        # Identify gaze movement
        gaze_movement = gaze_movement_identifier.identify(data_ts, gaze_position)

        if isinstance(gaze_movement, DispersionBasedGazeMovementIdentifier.Fixation):

            # Does the fixation match an AOI?
            for name, aoi in aoi_scene_projection.items():

                _, _, circle_ratio = aoi.circle_intersection(gaze_movement.centroid, args.deviation_max_threshold)

                if circle_ratio > 0.25:

                    if name != 'Screen':

                        print(f'{data_ts}: gaze step on {name} aoi')
                        #gaze_step = GazeStep(gaze_movement, name)

        # Unlock gaze movement exploitation
        gaze_movement_lock.release()

        return

    # Attach mouse callback to window
    cv2.setMouseCallback(window_name, on_mouse_event)

    # Waiting for 'ctrl+C' interruption
    try:

        # Analyse mouse positions
        while True:

            aoi_matrix = numpy.full((int(args.window_size[1]), int(args.window_size[0]), 3), 0, dtype=numpy.uint8)

            # Lock gaze movement identification
            gaze_movement_lock.acquire()
         
            # Check fixation identification
            if gaze_movement_identifier.current_fixation != None:

                current_fixation = gaze_movement_identifier.current_fixation

                # Draw looked AOI
                aoi_scene_projection.draw_circlecast(aoi_matrix, current_fixation.centroid, current_fixation.deviation_max)

                # Draw current fixation
                cv2.circle(aoi_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.deviation_max), (0, 255, 0), len(current_fixation.positions))
                
                # Draw current fixation gaze positions
                gaze_positions = current_fixation.positions.copy()
                while len(gaze_positions) >= 2:

                    ts_start, start_gaze_position = gaze_positions.pop_first()
                    ts_next, next_gaze_position = gaze_positions.first

                    # Draw start gaze
                    start_gaze_position.draw(aoi_matrix, draw_precision=False)

                    # Draw movement from start to next
                    cv2.line(aoi_matrix, start_gaze_position, next_gaze_position, (0, 55, 55), 1)

            else:

                # Draw pointer as gaze position
                gaze_position.draw(aoi_matrix, draw_precision=False)

                # Draw AOI scene projection
                aoi_scene_projection.draw(aoi_matrix, color=(0, 0, 255))

            # Check saccade identification
            if gaze_movement_identifier.current_saccade != None:

                current_saccade = gaze_movement_identifier.current_saccade

                # Draw current saccade gaze positions
                gaze_positions = current_saccade.positions.copy()
                while len(gaze_positions) >= 2:

                    ts_start, start_gaze_position = gaze_positions.pop_first()
                    ts_next, next_gaze_position = gaze_positions.first

                    # Draw start gaze
                    start_gaze_position.draw(aoi_matrix, draw_precision=False)

                    # Draw movement from start to next
                    cv2.line(aoi_matrix, start_gaze_position, next_gaze_position, (0, 0, 255), 1)

            # Unlock gaze movement identification
            gaze_movement_lock.release()

            # Draw frame
            cv2.imshow(window_name, aoi_matrix)

            # Stop calibration by pressing 'Esc' key
            if cv2.waitKey(10) == 27:
                break

    # Stop calibration on 'ctrl+C' interruption
    except KeyboardInterrupt:
        pass

    # Stop frame display
    cv2.destroyAllWindows()

if __name__ == '__main__':

    main()