aboutsummaryrefslogtreecommitdiff
path: root/src/argaze/utils/demo_gaze_features_run.py
blob: 67c7a527a87300728c93e1125da41905621c479f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
#!/usr/bin/env python

""" """

__author__ = "Théo de la Hogue"
__credits__ = []
__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)"
__license__ = "BSD"

import argparse
import os
import time
import threading
import queue

from argaze import ArFeatures, GazeFeatures
from argaze.AreaOfInterest import AOIFeatures
from argaze.GazeAnalysis import *

import cv2
import numpy
import pandas

def main():
    """
    Load AR environment from .json file to project AOI scene on screen and use mouse pointer to simulate gaze positions.
    """

    current_directory = os.path.dirname(os.path.abspath(__file__))

    # Manage arguments
    parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
    parser.add_argument('-dev', '--deviation_max_threshold', metavar='DEVIATION_MAX_THRESHOLD', type=int, default=50, help='maximal distance for fixation identification in pixel')
    parser.add_argument('-vel', '--velocity_max_threshold', metavar='VELOCITY_MAX_THRESHOLD', type=int, default=1, help='maximal velocity for fixation identification in pixel/millisecond')
    parser.add_argument('-dmin', '--duration_min_threshold', metavar='DURATION_MIN_THRESHOLD', type=int, default=200, help='minimal duration for fixation identification in millisecond')
    args = parser.parse_args()

    # Load AR enviroment
    demo_environment_filepath = os.path.join(current_directory, 'demo_environment/setup.json')
    demo_environment = ArFeatures.ArEnvironment.from_json(demo_environment_filepath)

    # Access to main AR scene
    demo_scene = demo_environment.scenes["AR Scene Demo"]

    # Load aoi scene image
    aoi_scene_filepath = os.path.join(current_directory, 'demo_environment/aoi_scene.jpg')
    aoi_scene_image = cv2.imread(aoi_scene_filepath)

    window_size = [aoi_scene_image.shape[1], aoi_scene_image.shape[0]]

    # Project AOI scene onto Full HD screen
    aoi_scene_projection = demo_scene.orthogonal_projection * window_size

    # Create a window to display AR environment
    window_name = "AOI Scene"
    cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)

    # Init gaze processing
    gaze_position = GazeFeatures.GazePosition()

    screen_frame = AOIFeatures.AOIFrame(aoi_scene_projection['Screen'], window_size)
    gaze_spread_sum = numpy.zeros((aoi_scene_image.shape[0], aoi_scene_image.shape[1]))
    gaze_spread_buffer = []
    gaze_spread_buffer_size = 10
    heatmap_matrix = numpy.zeros(aoi_scene_image.shape, dtype=numpy.uint8)

    enable_heatmap = False
    clear_sum_and_buffer = False
    enable_heatmap_buffer = False

    gaze_movement_identifier = {
        'I-DT': DispersionThresholdIdentification.GazeMovementIdentifier(args.deviation_max_threshold, args.duration_min_threshold),
        'I-VT': VelocityThresholdIdentification.GazeMovementIdentifier(args.velocity_max_threshold, args.duration_min_threshold)
    }
    identification_mode = 'I-DT'

    raw_scan_path = GazeFeatures.ScanPath()
    aoi_scan_path = GazeFeatures.AOIScanPath()

    tpm = TransitionProbabilityMatrix.AOIScanPathAnalyzer()
    tpm_analysis = pandas.DataFrame()
    enable_tpm_analysis = False

    raw_cK_analyzer = CoefficientK.ScanPathAnalyzer()
    raw_cK_analysis = 0

    aoi_cK_analyzer = CoefficientK.AOIScanPathAnalyzer()
    aoi_cK_analysis = 0

    ck_mode = 'raw'
    enable_ck_analysis = False

    gaze_movement_lock = threading.Lock()

    # Init timestamp
    start_ts = time.time()

    # Update pointer position
    def on_mouse_event(event, x, y, flags, param):

        nonlocal gaze_position
        nonlocal gaze_spread_sum
        nonlocal gaze_spread_buffer
        nonlocal heatmap_matrix
        nonlocal clear_sum_and_buffer
        nonlocal tpm_analysis
        nonlocal raw_cK_analysis
        nonlocal aoi_cK_analysis

        # Edit millisecond timestamp
        data_ts = int((time.time() - start_ts) * 1e3)

        # Update gaze position with mouse pointer position
        gaze_position = GazeFeatures.GazePosition((x, y))

        # Don't identify gaze movement while former identification is exploited in video loop
        if gaze_movement_lock.locked():
            return

        # Lock gaze movement exploitation
        gaze_movement_lock.acquire()

        # Edit heatmap
        if enable_heatmap:

            gaze_spread = screen_frame.point_spread(gaze_position.value, sigma=0.05)

            # Clear sum and buffer once
            if clear_sum_and_buffer:
                gaze_spread_sum = numpy.zeros((aoi_scene_image.shape[0], aoi_scene_image.shape[1]))
                gaze_spread_buffer = []
                clear_sum_and_buffer = False

            # Sum and and fill buffer
            gaze_spread_sum += gaze_spread
            gaze_spread_buffer.append(gaze_spread)

            # remove oldest gaze_spread buffer frame
            if enable_heatmap_buffer and len(gaze_spread_buffer) > gaze_spread_buffer_size:

                gaze_spread_sum -= gaze_spread_buffer.pop(0)

            heatmap_gray = (255 * gaze_spread_sum / numpy.max(gaze_spread_sum)).astype(numpy.uint8)
            heatmap_matrix = cv2.applyColorMap(heatmap_gray, cv2.COLORMAP_JET)

        else:

            # Identify gaze movement accordding select identification mode
            gaze_movement = gaze_movement_identifier[identification_mode].identify(data_ts, gaze_position)

            if GazeFeatures.is_fixation(gaze_movement):

                # Does the fixation match an AOI?
                look_at = 'Screen'
                for name, aoi in aoi_scene_projection.items():

                    _, _, circle_ratio = aoi.circle_intersection(gaze_movement.focus, args.deviation_max_threshold)

                    if circle_ratio > 0.25:

                        if name != 'Screen':

                            look_at = name
                            break

                # Append fixation to raw scan path
                raw_scan_path.append_fixation(data_ts, gaze_movement)

                try:

                    # Append fixation to aoi scan path
                    new_step = aoi_scan_path.append_fixation(data_ts, gaze_movement, look_at)

                    # Analyse aoi scan path
                    if new_step and len(aoi_scan_path) > 1:

                        if enable_tpm_analysis:

                            tpm_analysis = tpm.analyze(aoi_scan_path)

                        if enable_ck_analysis:

                            aoi_cK_analysis = aoi_cK_analyzer.analyze(aoi_scan_path)

                except GazeFeatures.AOIScanStepError as e:

                    print(f'Error on {e.aoi} step:', e)

            elif GazeFeatures.is_saccade(gaze_movement):

                # Append saccade to raw scan path
                new_step = raw_scan_path.append_saccade(data_ts, gaze_movement)

                # Analyse scan path
                if new_step and len(raw_scan_path) > 1:

                    if enable_ck_analysis:

                        raw_cK_analysis = raw_cK_analyzer.analyze(raw_scan_path)

                # Append saccade to aoi scan path
                aoi_scan_path.append_saccade(data_ts, gaze_movement)

        # Unlock gaze movement exploitation
        gaze_movement_lock.release()

        return

    # Attach mouse callback to window
    cv2.setMouseCallback(window_name, on_mouse_event)

    # Waiting for 'ctrl+C' interruption
    try:

        # Analyse mouse positions
        while True:

            aoi_matrix = aoi_scene_image.copy()

            # Lock gaze movement identification
            gaze_movement_lock.acquire()

            # Write heatmap help
            on_off = 'on' if enable_heatmap else 'off'
            enable_disable = 'disable' if enable_heatmap else 'enable'
            buffer_on_off = 'on' if enable_heatmap_buffer else 'off'
            buffer_enable_disable = 'disable' if enable_heatmap_buffer else 'enable'
            cv2.putText(aoi_matrix, f'Heatmap: {on_off} (Press \'h\' key to {enable_disable}), Buffer: {buffer_on_off} (Press \'b\' key to {buffer_enable_disable})', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)

            # Draw gaze spread heatmap
            if enable_heatmap:

                aoi_matrix = cv2.addWeighted(heatmap_matrix, 0.5, aoi_matrix, 1., 0)

            else:

                # Write identification mode
                cv2.putText(aoi_matrix, f'Gaze movement identification mode: {identification_mode} (Press \'m\' key to switch)', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
                    
                # Write TPM help
                on_off = 'on' if enable_tpm_analysis else 'off'
                display_hide = 'hide' if enable_tpm_analysis else 'display'
                cv2.putText(aoi_matrix, f'Transition matrix probability: {on_off} (Press \'t\' key to {display_hide})', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)

                # Write cK help
                on_off = 'on' if enable_ck_analysis else 'off'
                display_hide = 'hide' if enable_ck_analysis else 'display'
                cv2.putText(aoi_matrix, f'coefficient K: {on_off} (Press \'k\' key to {display_hide})', (20, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)

                # Check fixation identification
                if gaze_movement_identifier[identification_mode].current_fixation != None:

                    current_fixation = gaze_movement_identifier[identification_mode].current_fixation

                    # Draw looked AOI
                    aoi_scene_projection.draw_circlecast(aoi_matrix, current_fixation.focus, current_fixation.deviation_max, base_color=(0, 0, 0), matching_color=(255, 255, 255))

                    # Draw current fixation
                    current_fixation.draw(aoi_matrix, color=(255, 255, 0))

                    # Draw current fixation gaze positions
                    current_fixation.draw_positions(aoi_matrix)

                else:

                    # Draw pointer as gaze position
                    gaze_position.draw(aoi_matrix, draw_precision=False)

                    # Draw AOI scene projection
                    aoi_scene_projection.draw(aoi_matrix, color=(0, 0, 0))

                # Check saccade identification
                if gaze_movement_identifier[identification_mode].current_saccade != None:

                    current_saccade = gaze_movement_identifier[identification_mode].current_saccade

                    # Draw current saccade gaze positions
                    current_saccade.draw_positions(aoi_matrix)

                # Draw last 10 steps of raw scan path
                raw_scan_path.draw(aoi_matrix, fixation_color=(255, 0, 255), deepness=10)

                # Write last 5 steps of aoi scan path
                path = ''
                for step in aoi_scan_path[-5:]:

                    path += f'> {step.aoi} '
                
                path += f'> {aoi_scan_path.current_aoi}'

                cv2.putText(aoi_matrix, path, (20, window_size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
            
                # Draw transition probability matrix
                if enable_tpm_analysis:

                    for from_aoi, column in tpm_analysis.items():

                        for to_aoi, probability in column.items():

                            if from_aoi != to_aoi and probability > 0.0:

                                from_center = aoi_scene_projection[from_aoi].center.astype(int)
                                to_center = aoi_scene_projection[to_aoi].center.astype(int)
                                start_line = (0.5 * from_center + 0.5 * to_center).astype(int)

                                color = [int(probability*200) + 55, int(probability*200) + 55, int(probability*200) + 55]

                                cv2.line(aoi_matrix, start_line, to_center, color, int(probability*10) + 2)
                                cv2.line(aoi_matrix, from_center, to_center, [55, 55, 55], 2)

                if enable_ck_analysis:

                    # Write raw cK analysis
                    if raw_cK_analysis < 0.:

                        cv2.putText(aoi_matrix, f'Raw: Ambient attention', (20, window_size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
                    
                    elif raw_cK_analysis > 0.:

                        cv2.putText(aoi_matrix, f'Raw: Focal attention', (20, window_size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)
                    
                    # Write aoi cK analysis
                    if aoi_cK_analysis < 0.:

                        cv2.putText(aoi_matrix, f'AOI: Ambient attention', (20, window_size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
                    
                    elif aoi_cK_analysis > 0.:

                        cv2.putText(aoi_matrix, f'AOI: Focal attention', (20, window_size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)

            # Unlock gaze movement identification
            gaze_movement_lock.release()

            # Draw frame
            cv2.imshow(window_name, aoi_matrix)

            key_pressed = cv2.waitKey(10)

            #if key_pressed != -1:
            #    print(key_pressed)

            # Switch identification mode with 'm' key 
            if key_pressed == 109:

                mode_list = list(gaze_movement_identifier.keys())
                current_index = mode_list.index(identification_mode) + 1
                identification_mode = mode_list[current_index % len(mode_list)]

            # Enable heatmap with 'h' key
            if key_pressed == 104:

                enable_heatmap = not enable_heatmap

            # Enable heatmap buffer with 'b' key
            if key_pressed == 98:

                enable_heatmap_buffer = not enable_heatmap_buffer

                if enable_heatmap_buffer:
                    clear_sum_and_buffer = True

            # Enable cK analysis with 'k' key 
            if key_pressed == 107:

                enable_ck_analysis = not enable_ck_analysis

            # Enable TPM analysis with 't' key 
            if key_pressed == 116:

                enable_tpm_analysis = not enable_tpm_analysis

            # Stop calibration by pressing 'Esc' key
            if cv2.waitKey(10) == 27:
                break

    # Stop calibration on 'ctrl+C' interruption
    except KeyboardInterrupt:
        pass

    # Stop frame display
    cv2.destroyAllWindows()

if __name__ == '__main__':

    main()