1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
|
#!/usr/bin/env python
""" """
__author__ = "Théo de la Hogue"
__credits__ = []
__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)"
__license__ = "BSD"
import argparse
import os
import time
import threading
from argaze import ArFeatures, GazeFeatures
from argaze.AreaOfInterest import AOIFeatures
from argaze.GazeAnalysis import *
import cv2
import numpy
import pandas
def main():
"""
Load AR environment from .json file to project AOI scene on screen and use mouse pointer to simulate gaze positions.
"""
current_directory = os.path.dirname(os.path.abspath(__file__))
# Manage arguments
parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
parser.add_argument('-dev', '--deviation_max_threshold', metavar='DEVIATION_MAX_THRESHOLD', type=int, default=50, help='maximal distance for fixation identification in pixel')
parser.add_argument('-vel', '--velocity_max_threshold', metavar='VELOCITY_MAX_THRESHOLD', type=int, default=1, help='maximal velocity for fixation identification in pixel/millisecond')
parser.add_argument('-dmin', '--duration_min_threshold', metavar='DURATION_MIN_THRESHOLD', type=int, default=200, help='minimal duration for fixation identification in millisecond')
args = parser.parse_args()
# Load AR enviroment
demo_environment_filepath = os.path.join(current_directory, 'demo_environment/setup.json')
demo_environment = ArFeatures.ArEnvironment.from_json(demo_environment_filepath)
# Access to main AR scene
demo_scene = demo_environment.scenes["AR Scene Demo"]
# Load aoi scene image
aoi_scene_filepath = os.path.join(current_directory, 'demo_environment/aoi_scene.jpg')
aoi_scene_image = cv2.imread(aoi_scene_filepath)
window_size = [aoi_scene_image.shape[1], aoi_scene_image.shape[0]]
# Project AOI scene onto Full HD screen
aoi_scene_projection = demo_scene.orthogonal_projection * window_size
# Create a window to display AR environment
window_name = "AOI Scene"
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
# Init gaze processing
gaze_position = GazeFeatures.GazePosition()
screen_frame = AOIFeatures.AOIFrame(aoi_scene_projection['Screen'], window_size)
gaze_spread_sum = numpy.zeros((aoi_scene_image.shape[0], aoi_scene_image.shape[1]))
heatmap_matrix = numpy.zeros(aoi_scene_image.shape, dtype=numpy.uint8)
gaze_movement_identifier = {
'I-DT': DispersionThresholdIdentification.GazeMovementIdentifier(args.deviation_max_threshold, args.duration_min_threshold),
'I-VT': VelocityThresholdIdentification.GazeMovementIdentifier(args.velocity_max_threshold, args.duration_min_threshold)
}
identification_mode = 'I-DT'
raw_scan_path = GazeFeatures.ScanPath()
aoi_scan_path = GazeFeatures.AOIScanPath()
tpm = TransitionProbabilityMatrix.AOIScanPathAnalyzer()
tpm_analysis = pandas.DataFrame()
raw_cK_analyzer = CoefficientK.ScanPathAnalyzer()
raw_cK_analysis = 0
aoi_cK_analyzer = CoefficientK.AOIScanPathAnalyzer()
aoi_cK_analysis = 0
ck_mode = 'raw'
gaze_movement_lock = threading.Lock()
# Init timestamp
start_ts = time.time()
# Update pointer position
def on_mouse_event(event, x, y, flags, param):
nonlocal gaze_position
nonlocal gaze_spread_sum
nonlocal heatmap_matrix
nonlocal tpm_analysis
nonlocal raw_cK_analysis
nonlocal aoi_cK_analysis
# Edit millisecond timestamp
data_ts = int((time.time() - start_ts) * 1e3)
# Update gaze position with mouse pointer position
gaze_position = GazeFeatures.GazePosition((x, y))
# Don't identify gaze movement while former identification is exploited in video loop
if gaze_movement_lock.locked():
return
# Lock gaze movement exploitation
gaze_movement_lock.acquire()
# Edit heatmap
gaze_spread_sum += screen_frame.point_spread(gaze_position.value, sigma=0.05)
heatmap_gray = (255 * gaze_spread_sum / numpy.max(gaze_spread_sum)).astype(numpy.uint8)
heatmap_matrix = cv2.applyColorMap(heatmap_gray, cv2.COLORMAP_JET)
# Identify gaze movement accordding select identification mode
gaze_movement = gaze_movement_identifier[identification_mode].identify(data_ts, gaze_position)
if GazeFeatures.is_fixation(gaze_movement):
# Does the fixation match an AOI?
look_at = 'Screen'
for name, aoi in aoi_scene_projection.items():
_, _, circle_ratio = aoi.circle_intersection(gaze_movement.focus, args.deviation_max_threshold)
if circle_ratio > 0.25:
if name != 'Screen':
look_at = name
break
# Append fixation to raw scan path
raw_scan_path.append_fixation(data_ts, gaze_movement)
try:
# Append fixation to aoi scan path
new_step = aoi_scan_path.append_fixation(data_ts, gaze_movement, look_at)
# Analyse aoi scan path
if new_step and len(aoi_scan_path) > 1:
tpm_analysis = tpm.analyze(aoi_scan_path)
aoi_cK_analysis = aoi_cK_analyzer.analyze(aoi_scan_path)
except GazeFeatures.AOIScanStepError as e:
print(f'Error on {e.aoi} step:', e)
elif GazeFeatures.is_saccade(gaze_movement):
# Append saccade to raw scan path
new_step = raw_scan_path.append_saccade(data_ts, gaze_movement)
# Analyse scan path
if new_step and len(raw_scan_path) > 1:
raw_cK_analysis = raw_cK_analyzer.analyze(raw_scan_path)
# Append saccade to aoi scan path
aoi_scan_path.append_saccade(data_ts, gaze_movement)
# Unlock gaze movement exploitation
gaze_movement_lock.release()
return
# Attach mouse callback to window
cv2.setMouseCallback(window_name, on_mouse_event)
# Waiting for 'ctrl+C' interruption
try:
# Analyse mouse positions
while True:
aoi_matrix = aoi_scene_image.copy()
# Lock gaze movement identification
gaze_movement_lock.acquire()
# Draw gaze spread heatmap
aoi_matrix = cv2.addWeighted(heatmap_matrix, 0.5, aoi_matrix, 1., 0)
#aoi_matrix = numpy.maximum(aoi_matrix, heatmap_matrix)
# Write identification mode
cv2.putText(aoi_matrix, f'Gaze movement identification mode: {identification_mode} (Press \'m\' key to switch)', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
# Check fixation identification
if gaze_movement_identifier[identification_mode].current_fixation != None:
current_fixation = gaze_movement_identifier[identification_mode].current_fixation
# Draw looked AOI
aoi_scene_projection.draw_circlecast(aoi_matrix, current_fixation.focus, current_fixation.deviation_max, base_color=(0, 0, 0), matching_color=(255, 255, 255))
# Draw current fixation
cv2.circle(aoi_matrix, (int(current_fixation.focus[0]), int(current_fixation.focus[1])), int(current_fixation.deviation_max), (255, 255, 255), len(current_fixation.positions))
# Draw current fixation gaze positions
gaze_positions = current_fixation.positions.copy()
while len(gaze_positions) >= 2:
ts_start, start_gaze_position = gaze_positions.pop_first()
ts_next, next_gaze_position = gaze_positions.first
# Draw start gaze
start_gaze_position.draw(aoi_matrix, draw_precision=False)
# Draw movement from start to next
cv2.line(aoi_matrix, start_gaze_position, next_gaze_position, (0, 55, 55), 1)
else:
# Draw pointer as gaze position
gaze_position.draw(aoi_matrix, draw_precision=False)
# Draw AOI scene projection
aoi_scene_projection.draw(aoi_matrix, color=(0, 0, 0))
# Check saccade identification
if gaze_movement_identifier[identification_mode].current_saccade != None:
current_saccade = gaze_movement_identifier[identification_mode].current_saccade
# Draw current saccade gaze positions
gaze_positions = current_saccade.positions.copy()
while len(gaze_positions) >= 2:
ts_start, start_gaze_position = gaze_positions.pop_first()
ts_next, next_gaze_position = gaze_positions.first
# Draw start gaze
start_gaze_position.draw(aoi_matrix, draw_precision=False)
# Draw movement from start to next
cv2.line(aoi_matrix, start_gaze_position, next_gaze_position, (0, 0, 255), 1)
# Write last 5 steps of aoi scan path
path = ''
for step in aoi_scan_path[-5:]:
path += f'> {step.aoi} '
path += f'> {aoi_scan_path.current_aoi}'
cv2.putText(aoi_matrix, path, (20, window_size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
# Draw transition probability matrix
for from_aoi, column in tpm_analysis.items():
for to_aoi, probability in column.items():
if from_aoi != to_aoi and probability > 0.0:
from_center = aoi_scene_projection[from_aoi].center.astype(int)
to_center = aoi_scene_projection[to_aoi].center.astype(int)
start_line = (0.5 * from_center + 0.5 * to_center).astype(int)
color = [int(probability*200) + 55, int(probability*200) + 55, int(probability*200) + 55]
cv2.line(aoi_matrix, start_line, to_center, color, int(probability*10) + 2)
cv2.line(aoi_matrix, from_center, to_center, [55, 55, 55], 2)
# Write raw cK analysis
if raw_cK_analysis < 0.:
cv2.putText(aoi_matrix, f'Raw: Ambient attention', (20, window_size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
elif raw_cK_analysis > 0.:
cv2.putText(aoi_matrix, f'Raw: Focal attention', (20, window_size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)
# Write aoi cK analysis
if aoi_cK_analysis < 0.:
cv2.putText(aoi_matrix, f'AOI: Ambient attention', (20, window_size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
elif aoi_cK_analysis > 0.:
cv2.putText(aoi_matrix, f'AOI: Focal attention', (20, window_size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)
### TEST: GazePosition Heatmap
##############################
# Unlock gaze movement identification
gaze_movement_lock.release()
# Draw frame
cv2.imshow(window_name, aoi_matrix)
key_pressed = cv2.waitKey(10)
#if key_pressed != -1:
# print(key_pressed)
# Switch identification mode with 'm' key
if key_pressed == 109:
mode_list = list(gaze_movement_identifier.keys())
current_index = mode_list.index(identification_mode) + 1
identification_mode = mode_list[current_index % len(mode_list)]
# Stop calibration by pressing 'Esc' key
if cv2.waitKey(10) == 27:
break
# Stop calibration on 'ctrl+C' interruption
except KeyboardInterrupt:
pass
# Stop frame display
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|