aboutsummaryrefslogtreecommitdiff
path: root/src/argaze/utils/tobii_segment_gaze_movements_export.py
blob: fd362006e52710bb06545d33aa7c7810f43b836b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
#!/usr/bin/env python

import argparse
import os
import math

from argaze import DataStructures, GazeFeatures
from argaze.AreaOfInterest import AOIFeatures
from argaze.GazeAnalysis import DispersionBasedGazeMovementIdentifier
from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiVideo, TobiiSpecifications
from argaze.utils import MiscFeatures

import cv2 as cv
import numpy
import pandas

def main():
    """
    Analyse Tobii segment fixations
    """

    # Manage arguments
    parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
    parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='path to a tobii segment folder', required=True)
    parser.add_argument('-a', '--aoi', metavar='AOI_NAME', type=str, default=None, help='aoi name where to project gaze', required=True)
    parser.add_argument('-t', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)')
    parser.add_argument('-di', '--dispersion_threshold', metavar='DISPERSION_THRESHOLD', type=int, default=50, help='dispersion threshold in pixel')
    parser.add_argument('-du', '--duration_threshold', metavar='DURATION_THRESHOLD', type=int, default=200, help='duration threshold in millisecond')
    parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)')
    parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction)
    args = parser.parse_args()

    # Manage destination path
    destination_path = '.'
    if args.output != None:

        if not os.path.exists(os.path.dirname(args.output)):

            os.makedirs(os.path.dirname(args.output))
            print(f'{os.path.dirname(args.output)} folder created')

            destination_path = args.output

    else:

            destination_path = args.segment_path

            # Export into a dedicated time range folder
            if args.time_range[1] != None:
                timerange_path = f'[{int(args.time_range[0])}s - {int(args.time_range[1])}s]'
            else:
                timerange_path = f'[all]'

            destination_path = f'{destination_path}/{timerange_path}/{args.aoi}'

            if not os.path.exists(destination_path):

                os.makedirs(destination_path)
                print(f'{destination_path} folder created')

    aoi_filepath = f'{destination_path}/../aoi.json'

    fixations_filepath = f'{destination_path}/gaze_fixations.csv'
    saccades_filepath = f'{destination_path}/gaze_saccades.csv'

    gaze_status_filepath = f'{destination_path}/gaze_status.csv'
    gaze_status_video_filepath = f'{destination_path}/gaze_status.mp4'
    gaze_status_image_filepath = f'{destination_path}/gaze_status.png'

    gaze_metrics_filepath = f'{destination_path}/gaze_metrics.csv'

    # Load aoi scene projection
    ts_aois_projections = DataStructures.TimeStampedBuffer.from_json(aoi_filepath)

    print(f'\nAOI frames: ', len(ts_aois_projections))
    aoi_names = ts_aois_projections.as_dataframe().drop(['offset','comment'], axis=1).columns
    for aoi_name in aoi_names:
        print(f'\t{aoi_name}')

    # Load tobii segment
    tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None)

    # Get participant name
    participant_name = TobiiEntities.TobiiParticipant(f'{args.segment_path}/../../').name

    print(f'\nParticipant: {participant_name}')

    # Load a tobii segment video
    tobii_segment_video = tobii_segment.load_video()
    print(f'\nVideo properties:\n\tduration: {tobii_segment_video.duration/1e6} s\n\twidth: {tobii_segment_video.width} px\n\theight: {tobii_segment_video.height} px')

    # Load a tobii segment data
    tobii_segment_data = tobii_segment.load_data()
    
    print(f'\nLoaded data count:')
    for name in tobii_segment_data.keys():
        print(f'\t{name}: {len(tobii_segment_data[name])} data')

    # Access to timestamped gaze position data buffer
    tobii_ts_gaze_positions = tobii_segment_data['GazePosition']

    # Access to timestamped gaze 3D positions data buffer
    tobii_ts_gaze_positions_3d = tobii_segment_data['GazePosition3D']

    # Format tobii gaze position and precision in pixel and project it in aoi scene
    ts_gaze_positions = GazeFeatures.TimeStampedGazePositions()

    # Gaze projection metrics
    ts_projection_metrics = DataStructures.TimeStampedBuffer()

    # Initialise progress bar
    MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGazePositions projection:', suffix = 'Complete', length = 100)

    for ts, tobii_gaze_position in tobii_ts_gaze_positions.items():

        # Update Progress Bar
        progress = ts - int(args.time_range[0] * 1e6)
        MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'GazePositions projection:', suffix = 'Complete', length = 100)

        try:

            # Get the last aoi projection until the current gaze position timestamp
            ts_current_aois, current_aois = ts_aois_projections.pop_last_until(ts)

            assert(ts_current_aois <= ts)

            # QUESTION: What todo if the current aoi is too old ?
            # if the aoi didn't move it is not a problem...
            # For the moment, we just provide a metric to assess its not too big
            ts_projection_metrics[ts] = {'frame': ts_current_aois, 'age': ts - ts_current_aois}

            current_aoi_offset = current_aois.pop('offset')
            current_aoi_comment = current_aois.pop('comment')

            selected_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))

        # Wait for aois projection
        except KeyError:

            continue

        # Test gaze position validity
        if tobii_gaze_position.validity == 0:

            gaze_position_px = (int(tobii_gaze_position.value[0] * tobii_segment_video.width), int(tobii_gaze_position.value[1] * tobii_segment_video.height))
            
            # Get gaze position 3D at same gaze position timestamp
            tobii_gaze_position_3d = tobii_ts_gaze_positions_3d.pop(ts)

            # Test gaze position 3d validity
            if tobii_gaze_position_3d.validity == 0:
                
                gaze_precision_mm = numpy.sin(numpy.deg2rad(TobiiSpecifications.ACCURACY)) * tobii_gaze_position_3d.value[2]
                tobii_camera_hfov_mm = numpy.sin(numpy.deg2rad(TobiiSpecifications.CAMERA_HFOV)) * tobii_gaze_position_3d.value[2]
                
                gaze_precision_px = round(tobii_segment_video.width * float(gaze_precision_mm) / float(tobii_camera_hfov_mm))

                # Edit gaze position
                gaze_position = GazeFeatures.GazePosition(gaze_position_px, precision=gaze_precision_px)

                # Project gaze position into selected aois
                if selected_aoi.contains_point(gaze_position.value):

                    inner_x, inner_y = selected_aoi.inner_axis(gaze_position.value)

                    # Store inner gaze position for further movement processing
                    ts_gaze_positions[ts] = GazeFeatures.GazePosition((round(inner_x*1920), round(inner_y*1080))) # TEMP: This is Screen_Plan dimension

                    continue

        # Store unvalid gaze position for further movement processing
        ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition()

    print(f'\nGazePositions projection metrics:')
    projection_metrics_dataframe = ts_projection_metrics.as_dataframe()
    print(f'\t AOI age mean (ms) = {projection_metrics_dataframe.age.mean() * 1e-3}')
    print(f'\t AOI age max (ms) = {projection_metrics_dataframe.age.max() * 1e-3}')

    print(f'\nGazeMovement identifier parameters:')
    print(f'\tDispersion threshold = {args.dispersion_threshold}')
    print(f'\tDuration threshold = {args.duration_threshold}')

    # Start movement identification
    movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(args.dispersion_threshold, args.duration_threshold*1e3)
    ts_fixations = GazeFeatures.TimeStampedGazeMovements()
    ts_saccades = GazeFeatures.TimeStampedGazeMovements()
    ts_status = GazeFeatures.TimeStampedGazeStatus()

    # Initialise progress bar
    MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGazeMovements identification:', suffix = 'Complete', length = 100)

    for gaze_movement in movement_identifier(ts_gaze_positions):

        if isinstance(gaze_movement, DispersionBasedGazeMovementIdentifier.Fixation):

            start_ts, start_position = gaze_movement.positions.first

            ts_fixations[start_ts] = gaze_movement

            for ts, position in gaze_movement.positions.items():

                ts_status[ts] = GazeFeatures.GazeStatus.from_position(position, 'Fixation', len(ts_fixations))

        elif isinstance(gaze_movement, DispersionBasedGazeMovementIdentifier.Saccade):

            start_ts, start_position = gaze_movement.positions.first
            end_ts, end_position = gaze_movement.positions.last
            
            ts_saccades[start_ts] = gaze_movement

            ts_status[start_ts] = GazeFeatures.GazeStatus.from_position(start_position, 'Saccade', len(ts_saccades))
            ts_status[end_ts] = GazeFeatures.GazeStatus.from_position(end_position, 'Saccade', len(ts_saccades))

        else:
            continue

        # Update Progress Bar
        progress = start_ts - int(args.time_range[0] * 1e6)
        MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'GazeMovements identification:', suffix = 'Complete', length = 100)

    print(f'\nGazeMovements identification metrics:')
    print(f'\t{len(ts_fixations)} fixations found')
    print(f'\t{len(ts_saccades)} saccades found')

    # Export fixations analysis
    fixations_dataframe = ts_fixations.as_dataframe()
    fixations_dataframe.to_csv(fixations_filepath, index=True)
    print(f'\nFixations saved into {fixations_filepath}')

    # Export saccades analysis
    saccades_dataframe = ts_saccades.as_dataframe()
    saccades_dataframe.to_csv(saccades_filepath, index=True)
    print(f'Saccades saved into {saccades_filepath}')

    # Export gaze status analysis
    ts_status.as_dataframe().to_csv(gaze_status_filepath, index=True)
    print(f'Gaze status saved into {gaze_status_filepath}')

    # Export gaze metrics

    # Consider only fixations > duration threshold and saccades < duration threshold
    # This is mostly useful to filter first and last fixation/saccade as the time range can start anywhere
    filtered_fixations = fixations_dataframe[fixations_dataframe.duration > args.duration_threshold*1e3]
    filtered_saccades = saccades_dataframe[saccades_dataframe.duration < args.duration_threshold*1e3]

    segment_duration = tobii_segment_video.duration * 1e-3
    exploitation_time = filtered_fixations.duration.sum() * 1e-3
    exploration_time = filtered_saccades.duration.sum() * 1e-3

    metrics = {
        'segment_duration (ms)': segment_duration,
        'fixations_number': filtered_fixations.shape[0],
        'fixations_duration_mean (ms)': filtered_fixations.duration.mean() * 1e-3, 
        'saccades_number': filtered_saccades.shape[0],
        'saccades_duration_mean (ms)': filtered_saccades.duration.mean() * 1e-3,
        'exploitation_ratio (%)': exploitation_time / segment_duration * 100,
        'exploration_ratio (%)': exploration_time / segment_duration * 100,
        'exploit_explore_ratio:': exploitation_time / exploration_time
        }

    metrics_dataframe = pandas.DataFrame(metrics, index=[participant_name])

    metrics_dataframe.to_csv(gaze_metrics_filepath, index=True)
    print(f'Gaze metrics saved into {gaze_metrics_filepath}')

    # Prepare video exportation at the same format than segment video
    output_video = TobiiVideo.TobiiVideoOutput(gaze_status_video_filepath, tobii_segment_video.stream)

    # Reload aoi scene projection
    ts_aois_projections = DataStructures.TimeStampedBuffer.from_json(aoi_filepath)

    # 
    heatmap_matrix = numpy.full((1080, 1920, 3), 255, numpy.uint8)

    # Video and data loop
    try:

        # Initialise progress bar
        MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGazeMovements visualisation:', suffix = 'Complete', length = 100)

        current_fixation_ts, current_fixation = ts_fixations.pop_first()
        current_fixation_time_counter = 0

        current_saccade_ts, current_saccade = ts_saccades.pop_first()

        # Iterate on video frames
        for video_ts, video_frame in tobii_segment_video.frames():

            visu_matrix= numpy.zeros((1080, 1920, 3), numpy.uint8)

            try:

                # Get next aoi projection at video frame time
                ts_current_aois, current_aois = ts_aois_projections.pop_first()

                assert(ts_current_aois == video_ts)

                selected_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))

            # Wait for aois projection
            except KeyError:

                continue

            # Apply Perspective Transform Algorithm
            destination = numpy.float32([[0, 0],[1920, 0],[1920, 1080],[0, 1080]])
            aoi_matrix = cv.getPerspectiveTransform(selected_aoi.astype(numpy.float32), destination)
            visu_matrix = cv.warpPerspective(video_frame.matrix, aoi_matrix, (1920, 1080))

            # While current time belongs to the current fixation
            if video_ts >= current_fixation_ts and video_ts < current_fixation_ts + current_fixation.duration:

                current_fixation_time_counter += 1

                # Draw current fixation
                cv.circle(visu_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.dispersion), (0, 255, 0), current_fixation_time_counter)

                # 
                cv.circle(heatmap_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.dispersion), (0, 255, 0))

            # Check next fixation
            elif video_ts >= current_fixation_ts + current_fixation.duration and len(ts_fixations) > 0:

                current_fixation_ts, current_fixation = ts_fixations.pop_first()
                current_fixation_time_counter = 0

            # While current time belongs to the current saccade
            if video_ts >= current_saccade_ts and current_fixation_time_counter == 0:

                start_ts, start_position = current_saccade.positions.first
                end_ts, end_position = current_saccade.positions.last

                # Draw saccade
                int_start_position = (int(start_position[0]), int(start_position[1]))
                int_end_position = (int(end_position[0]), int(end_position[1]))

                cv.line(visu_matrix, int_start_position, int_end_position, (0, 0, 255), 2)
                cv.line(heatmap_matrix, int_start_position, int_end_position, (0, 0, 255), 2)

            # Check next saccade
            elif video_ts >= current_saccade_ts + current_saccade.duration and len(ts_saccades) > 0:

                current_saccade_ts, current_saccade = ts_saccades.pop_first()

            # Check next gaze
            try:

                # Get closest gaze position before video timestamp and remove all gaze positions before
                _, nearest_gaze_position = ts_gaze_positions.pop_last_before(video_ts)

                # Draw gaze
                nearest_gaze_position.draw(visu_matrix)

            # Wait for gaze position
            except KeyError:
                pass

            # Write segment timing
            cv.rectangle(visu_matrix, (0, 0), (550, 50), (63, 63, 63), -1)
            cv.putText(visu_matrix, f'Segment time: {int(video_ts/1e3)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
            
            # Write movement identification parameters
            cv.rectangle(visu_matrix, (0, 90), (550, 150), (63, 63, 63), -1)
            cv.putText(visu_matrix, f'Dispersion threshold: {args.dispersion_threshold} px', (20, 100), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
            cv.putText(visu_matrix, f'Duration threshold: {args.duration_threshold} ms', (20, 140), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)

            if args.window:

                # Close window using 'Esc' key
                if cv.waitKey(1) == 27:
                    break

                # Display video
                cv.imshow(f'Segment {tobii_segment.id} movements', visu_matrix)

            # Write video
            output_video.write(visu_matrix)

            # Update Progress Bar
            progress = video_ts - int(args.time_range[0] * 1e6)
            MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'Video with movements processing:', suffix = 'Complete', length = 100)

    # Exit on 'ctrl+C' interruption
    except KeyboardInterrupt:
        pass

    #
    cv.imwrite(gaze_status_image_filepath, heatmap_matrix)

    # End output video file
    output_video.close()
    print(f'\nVideo with movements saved into {gaze_status_video_filepath}\n')

if __name__ == '__main__':

    main()