aboutsummaryrefslogtreecommitdiff
path: root/src/argaze/utils/demo_gaze_features_run.py
blob: 3eaa79f06748d3aaafcb5b9942836b829e457930 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
#!/usr/bin/env python

import argparse
import time
import threading

from argaze import ArFeatures, GazeFeatures
from argaze.GazeAnalysis import *

import cv2
import numpy

def main():
    """
    Load AR environment from .json file to project AOI scene on screen and use mouse pointer to simulate gaze positions.
    """

    # Manage arguments
    parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
    parser.add_argument('environment', metavar='ENVIRONMENT', type=str, help='ar environment filepath')

    parser.add_argument('-dev', '--deviation_max_threshold', metavar='DEVIATION_MAX_THRESHOLD', type=int, default=50, help='maximal distance for fixation identification in pixel')
    parser.add_argument('-vel', '--velocity_max_threshold', metavar='VELOCITY_MAX_THRESHOLD', type=int, default=1, help='maximal velocity for fixation identification in pixel/millisecond')
    parser.add_argument('-dmin', '--duration_min_threshold', metavar='DURATION_MIN_THRESHOLD', type=int, default=200, help='minimal duration for fixation identification in millisecond')
    parser.add_argument('-s', '--window-size', metavar='WINDOW_SIZE', type=tuple, default=(1920, 1080), help='size of window in pixel')
    args = parser.parse_args()

    # Load AR enviroment
    demo_environment = ArFeatures.ArEnvironment.from_json(args.environment)

    # Access to main AR scene
    demo_scene = demo_environment.scenes["AR Scene Demo"]

    # Project AOI scene onto Full HD screen
    aoi_scene_projection = demo_scene.orthogonal_projection * args.window_size

    # Create a window to display AR environment
    window_name = "AOI Scene"
    cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)

    # Init gaze processing
    gaze_position = GazeFeatures.GazePosition()
    gaze_movement_identifier = {
        'I-DT': DispersionThresholdIdentification.GazeMovementIdentifier(args.deviation_max_threshold, args.duration_min_threshold),
        'I-VT': VelocityThresholdIdentification.GazeMovementIdentifier(args.velocity_max_threshold, args.duration_min_threshold)
    }
    identification_mode = 'I-DT'

    visual_scan_path = GazeFeatures.VisualScanPath()
    tpm = TransitionProbabilityMatrix.VisualScanPathAnalyzer()
    tpm_analysis = None

    gaze_movement_lock = threading.Lock()

    # Init timestamp
    start_ts = time.time()

    # Update pointer position
    def on_mouse_event(event, x, y, flags, param):

        nonlocal gaze_position
        nonlocal tpm_analysis

        # Edit millisecond timestamp
        data_ts = int((time.time() - start_ts) * 1e3)

        # Update gaze position with mouse pointer position
        gaze_position = GazeFeatures.GazePosition((x, y))

        # Don't identify gaze movement while former identification is exploited in video loop
        if gaze_movement_lock.locked():
            return

        # Lock gaze movement exploitation
        gaze_movement_lock.acquire()

        # Identify gaze movement accordding select identification mode
        gaze_movement = gaze_movement_identifier[identification_mode].identify(data_ts, gaze_position)

        if GazeFeatures.is_fixation(gaze_movement):

            # Does the fixation match an AOI?
            look_at = 'Screen'
            for name, aoi in aoi_scene_projection.items():

                _, _, circle_ratio = aoi.circle_intersection(gaze_movement.focus, args.deviation_max_threshold)

                if circle_ratio > 0.25:

                    if name != 'Screen':

                        look_at = name
                        break

            try:

                # Append fixation to visual scan path
                new_step = visual_scan_path.append_fixation(data_ts, gaze_movement, look_at)

                # Analyse transition probabilities
                if new_step and len(visual_scan_path) > 1:

                    tpm_analysis = tpm.analyze(visual_scan_path)

                    print(tpm_analysis)

            except GazeFeatures.VisualScanStepError as e:

                print(f'Error on {e.aoi} step:', e)

        elif GazeFeatures.is_saccade(gaze_movement):

            # Append saccade to visual scan path
            visual_scan_path.append_saccade(data_ts, gaze_movement)

        # Unlock gaze movement exploitation
        gaze_movement_lock.release()

        return

    # Attach mouse callback to window
    cv2.setMouseCallback(window_name, on_mouse_event)

    # Waiting for 'ctrl+C' interruption
    try:

        # Analyse mouse positions
        while True:

            aoi_matrix = numpy.full((int(args.window_size[1]), int(args.window_size[0]), 3), 0, dtype=numpy.uint8)

            # Write identification mode
            cv2.putText(aoi_matrix, f'Gaze movement identification mode: {identification_mode} (Press \'m\' key to switch)', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
                
            # Lock gaze movement identification
            gaze_movement_lock.acquire()
         
            # Check fixation identification
            if gaze_movement_identifier[identification_mode].current_fixation != None:

                current_fixation = gaze_movement_identifier[identification_mode].current_fixation

                # Draw looked AOI
                aoi_scene_projection.draw_circlecast(aoi_matrix, current_fixation.focus, current_fixation.deviation_max)

                # Draw current fixation
                cv2.circle(aoi_matrix, (int(current_fixation.focus[0]), int(current_fixation.focus[1])), int(current_fixation.deviation_max), (0, 255, 0), len(current_fixation.positions))
                
                # Draw current fixation gaze positions
                gaze_positions = current_fixation.positions.copy()
                while len(gaze_positions) >= 2:

                    ts_start, start_gaze_position = gaze_positions.pop_first()
                    ts_next, next_gaze_position = gaze_positions.first

                    # Draw start gaze
                    start_gaze_position.draw(aoi_matrix, draw_precision=False)

                    # Draw movement from start to next
                    cv2.line(aoi_matrix, start_gaze_position, next_gaze_position, (0, 55, 55), 1)

            else:

                # Draw pointer as gaze position
                gaze_position.draw(aoi_matrix, draw_precision=False)

                # Draw AOI scene projection
                aoi_scene_projection.draw(aoi_matrix, color=(0, 0, 255))

            # Check saccade identification
            if gaze_movement_identifier[identification_mode].current_saccade != None:

                current_saccade = gaze_movement_identifier[identification_mode].current_saccade

                # Draw current saccade gaze positions
                gaze_positions = current_saccade.positions.copy()
                while len(gaze_positions) >= 2:

                    ts_start, start_gaze_position = gaze_positions.pop_first()
                    ts_next, next_gaze_position = gaze_positions.first

                    # Draw start gaze
                    start_gaze_position.draw(aoi_matrix, draw_precision=False)

                    # Draw movement from start to next
                    cv2.line(aoi_matrix, start_gaze_position, next_gaze_position, (0, 0, 255), 1)

            # Unlock gaze movement identification
            gaze_movement_lock.release()

            # Draw frame
            cv2.imshow(window_name, aoi_matrix)

            key_pressed = cv2.waitKey(10)

            #if key_pressed != -1:
            #    print(key_pressed)

            # Switch identification mode with 'm' key 
            if key_pressed == 109:

                mode_list = list(gaze_movement_identifier.keys())
                current_index = mode_list.index(identification_mode) + 1
                identification_mode = mode_list[current_index % len(mode_list)]

            # Stop calibration by pressing 'Esc' key
            if cv2.waitKey(10) == 27:
                break

    # Stop calibration on 'ctrl+C' interruption
    except KeyboardInterrupt:
        pass

    # Stop frame display
    cv2.destroyAllWindows()

if __name__ == '__main__':

    main()