aboutsummaryrefslogtreecommitdiff
path: root/src/argaze/utils/demo_gaze_features_run.py
blob: e30f867915368a26c43bcbbfb543bb15d1c11157 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
#!/usr/bin/env python

""" """

__author__ = "Théo de la Hogue"
__credits__ = []
__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)"
__license__ = "BSD"

import argparse
import os
import time
import queue

from argaze import ArFeatures, GazeFeatures
from argaze.AreaOfInterest import AOIFeatures
from argaze.GazeAnalysis import *

import cv2
import numpy
import pandas

def main():
    """
    Load AR environment from .json file to project AOI scene on screen and use mouse pointer to simulate gaze positions.
    """

    current_directory = os.path.dirname(os.path.abspath(__file__))

    # Manage arguments
    parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
    parser.add_argument('environment', metavar='ENVIRONMENT', type=str, help='ar environment filepath')
    args = parser.parse_args()

    # Load AR environment
    ar_environment = ArFeatures.ArEnvironment.from_json(args.environment)

    # Select AR screen
    ar_screen = ar_environment.scenes["AR Scene Demo"].screens["GrayRectangle"]

    # Create a window to display AR environment
    cv2.namedWindow(ar_screen.name, cv2.WINDOW_AUTOSIZE)

    # Heatmap buffer display option
    enable_heatmap_buffer = False

    # Init timestamp
    start_time = time.time()

    # Update pointer position
    def on_mouse_event(event, x, y, flags, param):

        try:

            # Edit millisecond timestamp
            timestamp = int((time.time() - start_time) * 1e3)

            # Project gaze position into screen
            ar_screen.look(timestamp, GazeFeatures.GazePosition((x, y)))

        except GazeFeatures.AOIScanStepError as e:

            print(f'Error on {e.aoi} step:', e)

    # Attach mouse callback to window
    cv2.setMouseCallback(ar_screen.name, on_mouse_event)

    # Waiting for 'ctrl+C' interruption
    try:

        # Analyse mouse positions
        while True:

            # Draw screen
            image = ar_screen.background.copy()

            # Draw heatmap
            if ar_screen.heatmap:

                image = cv2.addWeighted(ar_screen.heatmap.image, 0.5, image, 1., 0)

                # Write heatmap buffer manual
                buffer_on_off = 'on' if enable_heatmap_buffer else 'off'
                buffer_display_disable = 'disable' if enable_heatmap_buffer else 'enable'
                cv2.putText(image, f'Heatmap buffer: {buffer_on_off} (Press \'b\' key to {buffer_display_disable})', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_heatmap_buffer else (255, 255, 255), 1, cv2.LINE_AA)

            # Draw AOI
            ar_screen.aoi_2d_scene.draw(image, color=(0, 0, 0))

            # Draw gaze position
            ar_screen.current_gaze_position.draw(image, color=(255, 255, 255))

            # Draw gaze movements
            current_gaze_movement = ar_screen.current_gaze_movement

            current_gaze_movement.draw(image, color=(0, 255, 255))
            current_gaze_movement.draw_positions(image)

            # Check screen fixation
            if GazeFeatures.is_fixation(current_gaze_movement):

                # Draw looked AOI
                ar_screen.aoi_2d_scene.draw_circlecast(image, current_gaze_movement.focus, current_gaze_movement.deviation_max, base_color=(0, 0, 0), matching_color=(255, 255, 255))

            # Write last 5 steps of aoi scan path
            path = ''
            for step in ar_screen.aoi_scan_path[-5:]:

                path += f'> {step.aoi} '
            
            path += f'> {ar_screen.aoi_scan_path.current_aoi}'

            cv2.putText(image, path, (20, ar_screen.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)

            # Display Transition matrix analysis if loaded
            try:

                transition_matrix_analyzer = ar_screen.aoi_scan_path_analyzers["TransitionMatrix"]

                cv2.putText(image, f'Transition matrix density: {transition_matrix_analyzer.transition_matrix_density:.2f}', (20, ar_screen.size[1]-160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
                
                # Iterate over indexes (departures)
                for from_aoi, row in transition_matrix_analyzer.transition_matrix_probabilities.iterrows():

                    # Iterate over columns (destinations)
                    for to_aoi, probability in row.items():

                        if from_aoi != to_aoi and probability > 0.0:

                            from_center = ar_screen.aoi_2d_scene[from_aoi].center.astype(int)
                            to_center = ar_screen.aoi_2d_scene[to_aoi].center.astype(int)
                            start_line = (0.5 * from_center + 0.5 * to_center).astype(int)

                            color = [int(probability*200) + 55, int(probability*200) + 55, int(probability*200) + 55]

                            cv2.line(image, start_line, to_center, color, int(probability*10) + 2)
                            cv2.line(image, from_center, to_center, [55, 55, 55], 2)
                
            except KeyError:
                pass

            # Display scan path K Coefficient analysis if loaded
            try:

                kc_analyzer = ar_screen.scan_path_analyzers["KCoefficient"]
                
                # Write raw Kc analysis
                if kc_analyzer.K < 0.:

                    cv2.putText(image, f'K coefficient: Ambient attention', (20, ar_screen.size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
                
                elif kc_analyzer.K > 0.:

                    cv2.putText(image, f'K coefficient: Focal attention', (20, ar_screen.size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)
                
            except KeyError:
                pass

            # Display aoi scan path K-modified coefficient analysis if loaded
            try:

                aoi_kc_analyzer = ar_screen.aoi_scan_path_analyzers["KCoefficient"]

                # Write aoi Kc analysis
                if aoi_kc_analyzer.K < 0.:

                    cv2.putText(image, f'K-modified coefficient: Ambient attention', (20, ar_screen.size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
                
                elif aoi_kc_analyzer.K > 0.:

                    cv2.putText(image, f'K-modified coefficient: Focal attention', (20, ar_screen.size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)
                
            except KeyError:
                pass

            # Display Lempel-Ziv complexity analysis if loaded
            try:

                lzc_analyzer = ar_screen.aoi_scan_path_analyzers["LempelZivComplexity"]

                cv2.putText(image, f'Lempel-Ziv complexity: {lzc_analyzer.lempel_ziv_complexity}', (20, ar_screen.size[1]-200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)

            except KeyError:
                pass

            # Display N-Gram analysis if loaded
            try:

                ngram_analyzer = ar_screen.aoi_scan_path_analyzers["NGram"]

                # Display only 3-gram analysis
                start = ar_screen.size[1] - ((len(ngram_analyzer.ngrams_count[3]) + 1) * 40)
                cv2.putText(image, f'{ngram_analyzer.n_max}-Gram:', (ar_screen.size[0]-700, start-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)

                for i, (ngram, count) in enumerate(ngram_analyzer.ngrams_count[3].items()):

                    ngram_string = f'{ngram[0]}'
                    for g in range(1, 3):
                        ngram_string += f'>{ngram[g]}'

                    cv2.putText(image, f'{ngram_string}: {count}', (ar_screen.size[0]-700, start+(i*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)

            except KeyError:
                pass

            # Display Entropy analysis if loaded
            try:

                entropy_analyzer = ar_screen.aoi_scan_path_analyzers["Entropy"]

                cv2.putText(image, f'Stationary entropy: {entropy_analyzer.stationary_entropy:.3f},', (20, ar_screen.size[1]-280), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
                cv2.putText(image, f'Transition entropy: {entropy_analyzer.transition_entropy:.3f},', (20, ar_screen.size[1]-240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
                
            except KeyError:
                pass

            # Display Nearest Neighbor index analysis if loaded
            try:

                nni_analyzer = ar_screen.scan_path_analyzers["NearestNeighborIndex"]

                cv2.putText(image, f'Nearest neighbor index: {nni_analyzer.nearest_neighbor_index:.3f}', (20, ar_screen.size[1]-320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
                
            except KeyError:
                pass

            # Display Exploit/Explore ratio analysis if loaded
            try:

                xxr_analyser = ar_screen.scan_path_analyzers["ExploitExploreRatio"]

                cv2.putText(image, f'Exploit explore ratio: {xxr_analyser.exploit_explore_ratio:.3f}', (20, ar_screen.size[1]-360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)

            except KeyError:

                pass

            # Draw image
            cv2.imshow(ar_screen.name, image)

            key_pressed = cv2.waitKey(10)

            #if key_pressed != -1:
            #    print(key_pressed)

            # Reload environment with 'h' key
            if key_pressed == 114:

                ar_environment = ArFeatures.ArEnvironment.from_json(args.environment)
                ar_screen = ar_environment.scenes["AR Scene Demo"].screens["GrayRectangle"]

            # Enable heatmap buffer with 'b' key
            if key_pressed == 98:

                enable_heatmap_buffer = not enable_heatmap_buffer

                ar_screen.heatmap.init(10 if enable_heatmap_buffer else 0)

            # Stop calibration by pressing 'Esc' key
            if key_pressed == 27:
                break

    # Stop calibration on 'ctrl+C' interruption
    except KeyboardInterrupt:
        pass

    # Stop image display
    cv2.destroyAllWindows()

if __name__ == '__main__':

    main()