aboutsummaryrefslogtreecommitdiff
path: root/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py
blob: 25babf6cacc3379c9d5562ce601aeb044476b5e7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
#!/usr/bin/env python

import argparse
import os
import json

from argaze import DataStructures
from argaze import GazeFeatures
from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiVideo
from argaze.ArUcoMarkers import *
from argaze.AreaOfInterest import *
from argaze.utils import MiscFeatures

import numpy
import cv2 as cv

def main():
    """
    Track ArUco markers into Tobii Glasses Pro 2 segment video file. 
    For each loaded AOI scene .obj file, position the scene virtually relatively to each detected ArUco markers and project the scene into camera frame. 
    Then, detect if Tobii gaze point is inside any AOI.
    Export AOIs video and data.
    """

    # Manage arguments
    parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
    parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='segment path')
    parser.add_argument('-r', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)')
    parser.add_argument('-c', '--camera_calibration', metavar='CAM_CALIB', type=str, default=None, help='json camera calibration filepath')
    parser.add_argument('-p', '--aruco_tracker_configuration', metavar='TRACK_CONFIG', type=str, default=None, help='json aruco tracker configuration filepath')
    parser.add_argument('-md', '--marker_dictionary', metavar='MARKER_DICT', type=str, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL,DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)')
    parser.add_argument('-ms', '--marker_size', metavar='MARKER_SIZE', type=float, default=6, help='aruco marker size (cm)')
    parser.add_argument('-mi', '--marker_id_scene', metavar='MARKER_ID_SCENE', type=json.loads, help='{"marker": "aoi scene filepath"} dictionary')
    parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)')
    parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction)
    args = parser.parse_args()

    if args.segment_path != None:

        # Manage markers id to track
        if args.marker_id_scene == None:
            print(f'Track any Aruco markers from the {args.marker_dictionary} dictionary')
        else:
            print(f'Track Aruco markers {list(args.marker_id_scene.keys())} from the {args.marker_dictionary} dictionary')

        # Manage destination path
        destination_path = '.'
        if args.output != None:

            if not os.path.exists(os.path.dirname(args.output)):

                os.makedirs(os.path.dirname(args.output))
                print(f'{os.path.dirname(args.output)} folder created')

                destination_path = args.output

        else:

                destination_path = args.segment_path

                # Export into a dedicated time range folder
                timerange_path = f'[{int(args.time_range[0])}s - {int(args.time_range[1])}s]'

                destination_path = f'{destination_path}/{timerange_path}'

                if not os.path.exists(destination_path):

                    os.makedirs(destination_path)
                    print(f'{destination_path} folder created')

        vs_data_filepath = f'{destination_path}/visual_scan.csv'
        vs_visu_filepath = f'{destination_path}/visual_scan_marker_%d.jpg'
        vs_video_filepath = f'{destination_path}/visual_scan.mp4'

        # Load a tobii segment
        tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None)

        # Load a tobii segment video
        tobii_segment_video = tobii_segment.load_video()
        print(f'Video properties:\n\tduration: {tobii_segment_video.get_duration()/1e6} s\n\twidth: {tobii_segment_video.get_width()} px\n\theight: {tobii_segment_video.get_height()} px')

        # Load a tobii segment data
        tobii_segment_data = tobii_segment.load_data()

        print(f'Loaded data count:')
        for name in tobii_segment_data.keys():
            print(f'\t{name}: {len(tobii_segment_data[name])} data')

        # Access to timestamped gaze positions data buffer
        tobii_ts_gaze_positions = tobii_segment_data['GazePosition']

        # Access to timestamped gaze 3D positions data buffer
        #tobii_ts_gaze_3d_positions = tobii_segment_data.gidx_gp3
        #print(f'{len(tobii_ts_gaze_3d_positions)} gaze 3D positions loaded')

        # Prepare video exportation at the same format than segment video
        output_video = TobiiVideo.TobiiVideoOutput(vs_video_filepath, tobii_segment_video.get_stream())

        # Create aruco camera
        aruco_camera = ArUcoCamera.ArUcoCamera()

        # Load calibration file
        if args.camera_calibration != None:

            aruco_camera.load_calibration_file(args.camera_calibration)

        else:

            raise ValueError('.json camera calibration filepath required. Use -c option.')

        # Create aruco tracker
        aruco_tracker = ArUcoTracker.ArUcoTracker(args.marker_dictionary, args.marker_size, aruco_camera)

        # Load specific configuration file
        if args.aruco_tracker_configuration != None:

            aruco_tracker.load_configuration_file(args.aruco_tracker_configuration)

            print(f'ArUcoTracker configuration for {aruco_tracker.get_markers_dictionay().get_markers_format()} markers detection:')
            aruco_tracker.print_configuration()

        # Load AOI 3D scene for each marker and create a AOI 2D scene and frame when a 'Visualisation_Plan' AOI exist
        aoi3D_scenes = {}
        aoi2D_visu_scenes = {}
        aoi2D_visu_frames = {}

        for marker_id, aoi_scene_filepath in args.marker_id_scene.items():

            marker_id = int(marker_id)
            
            aoi3D_scenes[marker_id] = AOI3DScene.AOI3DScene()
            aoi3D_scenes[marker_id].load(aoi_scene_filepath)

            print(f'AOI in {os.path.basename(aoi_scene_filepath)} scene related to marker #{marker_id}:')
            for aoi in aoi3D_scenes[marker_id].keys():

                # If a 'Visualisation_Plan' AOI exist
                # TODO: document this deep feature !!!
                if aoi == 'Visualisation_Plan':

                    print(f'\tVisualisation_Plan detected: a visual scan picture will be output for this marker.')

                    # Create a visual scan visualisation frame
                    visu_width, visu_height = 1920, 1080
                    scene_width, scene_height, __ = aoi3D_scenes[marker_id].size()

                    aoi2D_visu_frames[marker_id] = numpy.full((visu_height, visu_width, 3), 255, dtype=numpy.uint8)

                    if args.time_range != (0., None):
                        cv.putText(aoi2D_visu_frames[marker_id], f'Segment time range: {int(args.time_range[0] * 1000)} - {int(args.time_range[1] * 1000)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv.LINE_AA)

                    # Project 3D scene onto the visualisation plan
                    aoi3D_scenes[marker_id].rotation = numpy.asarray([[-numpy.pi, 0.0, 0.0]])
                    aoi3D_scenes[marker_id].translation = aoi3D_scenes[marker_id].center()*[-1, 1, 0] + [0, 0, scene_height]

                    # Edit a projection matrix for the reference frame
                    K0 = numpy.asarray([[visu_height, 0.0, visu_width/2], [0.0, visu_height, visu_height/2], [0.0, 0.0, 1.0]])

                    aoi2D_visu_scenes[marker_id] = aoi3D_scenes[marker_id].project(K0)

                    for name, aoi in aoi2D_visu_scenes[marker_id].items():
                        if name != 'Visualisation_Plan':
                            aoi.draw(aoi2D_visu_frames[marker_id], (0, 0, 0))

                else:

                    print(f'\t{aoi}')

        def aoi3D_scene_selector(marker_id):
            return aoi3D_scenes.get(marker_id, None)

        def aoi2D_visu_scene_selector(marker_id):
            return aoi2D_visu_scenes.get(marker_id, None)

        def aoi2D_visu_frame_selector(marker_id):
            return aoi2D_visu_frames.get(marker_id, None)

        # Create timestamped buffer to store AOIs scene in time
        ts_aois_scenes = AOIFeatures.TimeStampedAOIScenes()

        # Create timestamped buffer to store gaze positions in time
        ts_gaze_positions = GazeFeatures.TimeStampedGazePositions()

        # Video and data replay loop
        try:

            # Initialise progress bar
            #MiscFeatures.printProgressBar(0, tobii_segment_video.get_duration()/1000, prefix = 'Progress:', suffix = 'Complete', length = 100)

            # Iterate on video frames
            for video_ts, video_frame in tobii_segment_video.frames():

                video_ts_ms = video_ts / 1000

                # write segment timing
                cv.putText(video_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)

                try:

                    # Get nearest gaze position before video timestamp and remove all gaze positions before
                    nearest_gaze_ts, nearest_gaze_position = tobii_ts_gaze_positions.pop_first_until(video_ts)

                    # Draw gaze position
                    video_gaze_pixel = (int(nearest_gaze_position.value[0] * video_frame.width), int(nearest_gaze_position.value[1] * video_frame.height))
                    cv.circle(video_frame.matrix, video_gaze_pixel, 4, (0, 255, 255), -1)

                    # Store gaze position at this time in millisecond
                    ts_gaze_positions[round(video_ts_ms)] = video_gaze_pixel

                # Wait for gaze position
                except ValueError:
                    continue

                # Track markers with pose estimation and draw them
                aruco_tracker.track(video_frame.matrix)
                aruco_tracker.draw(video_frame.matrix)

                # Draw focus area
                cv.circle(video_frame.matrix, (int(video_frame.width/2), int(video_frame.height/2)), int(video_frame.width/3), (255, 150, 150), 1)
                        
                # Project 3D scene on each video frame and the visualisation frame
                if aruco_tracker.get_markers_number():

                    # Store aoi 2D video for further scene merging
                    aoi2D_dict = {}

                    for (i, marker_id) in enumerate(aruco_tracker.get_markers_ids()):

                        # Select 3D scene related to detected marker
                        aoi3D_scene = aoi3D_scene_selector(marker_id)
                        
                        if aoi3D_scene == None:
                            continue
                        
                        # Ignore marker out of focus area
                        marker_x, marker_y = aruco_tracker.get_marker_center(i)
                        distance_to_center = ( (video_frame.width/2 - marker_x)**2 + (video_frame.height/2 - marker_y)**2 )**0.5

                        if distance_to_center > int(video_frame.width/3):
                            continue
                        
                        aoi3D_scene.rotation = aruco_tracker.get_marker_rotation(i)
                        aoi3D_scene.translation = aruco_tracker.get_marker_translation(i)

                        # Remove aoi outside vision field 
                        # The vision cone tip is positionned behind the head
                        aoi3D_scene = aoi3D_scene.clip(300, 150, cone_tip=[0., 0., -20.])

                        # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it
                        # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable.
                        aoi2D_video_scene = aoi3D_scene.project(aruco_camera.get_K())

                        # Store each 2D aoi for further scene merging
                        for name, aoi in aoi2D_video_scene.items():

                            if name not in aoi2D_dict.keys():
                                aoi2D_dict[name] = []

                            aoi2D_dict[name].append(aoi.clockwise())

                        # Select 2D visu scene if there is one for the detected marker
                        aoi2D_visu_scene = aoi2D_visu_scene_selector(marker_id)
                        aoi2D_visu_frame = aoi2D_visu_frame_selector(marker_id)
                        
                        if aoi2D_visu_scene == None:
                            continue
                        
                        look_at = aoi2D_video_scene['Visualisation_Plan'].look_at(video_gaze_pixel)

                        visu_gaze_pixel = aoi2D_visu_scene['Visualisation_Plan'].looked_pixel(look_at)
                        cv.circle(aoi2D_visu_frame, visu_gaze_pixel, 4, (0, 0, 255), -1)

                    # Merge all 2D aoi into a single 2D scene
                    aoi2D_merged_scene = AOI2DScene.AOI2DScene()
                    for name, aoi_array in aoi2D_dict.items():
                        aoi2D_merged_scene[name] = numpy.sum(aoi_array, axis=0) / len(aoi_array)

                    aoi2D_merged_scene.draw(video_frame.matrix, video_gaze_pixel, exclude=['Visualisation_Plan'])
                        
                    # Store 2D merged scene at this time in millisecond
                    ts_aois_scenes[round(video_ts_ms)] = aoi2D_merged_scene

                if args.window:

                    # Close window using 'Esc' key
                    if cv.waitKey(1) == 27:
                        break

                    # Display video
                    cv.imshow(f'Segment {tobii_segment.get_id()} ArUco AOI', video_frame.matrix)

                    # Display each visual scan frame
                    for marker_id, visu_frame in aoi2D_visu_frames.items():
                        cv.imshow(f'Segment {tobii_segment.get_id()} visual scan for marker {marker_id}', visu_frame)

                # Write video
                output_video.write(video_frame.matrix)

                # Update Progress Bar
                progress = video_ts_ms - int(args.time_range[0] * 1000)
                #MiscFeatures.printProgressBar(progress, tobii_segment_video.get_duration()/1000, prefix = 'Progress:', suffix = 'Complete', length = 100)

        # Exit on 'ctrl+C' interruption
        except KeyboardInterrupt:
            pass

        # Stop frame display
        cv.destroyAllWindows()

        # End output video file
        output_video.close()

        # Print aruco tracking metrics
        print('\nAruco marker tracking metrics')
        try_count, tracked_counts = aruco_tracker.get_track_metrics()

        for marker_id, tracked_count in tracked_counts.items():
            print(f'Markers {marker_id} has been detected in {tracked_count} / {try_count} frames ({round(100 * tracked_count / try_count, 2)} %)')

        # Build visual scan based on a pointer position
        visual_scan = GazeFeatures.PointerBasedVisualScan(ts_aois_scenes, ts_gaze_positions)
        print(f'{len(visual_scan.steps())} visual scan steps found')

        # Export visual scan data
        visual_scan.export_as_csv(vs_data_filepath)
        print(f'Visual scan data saved into {vs_data_filepath}')

        # Export each visual scan picture
        for marker_id, visu_frame in aoi2D_visu_frames.items():
            cv.imwrite(vs_visu_filepath % marker_id, visu_frame)
            print(f'Visual scan picture for marker {marker_id} saved into {vs_visu_filepath % marker_id}')

        # Notify when the visual scan video has been exported
        print(f'Visual scan video saved into {vs_video_filepath}')


if __name__ == '__main__':

    main()