aboutsummaryrefslogtreecommitdiff
path: root/src/argaze/utils/tobii_segment_gaze_movements_export.py
blob: f8bdb48e5274f23b7e67b1628898f1789658b0c9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
#!/usr/bin/env python

import argparse
import os
import math

from argaze import DataStructures, GazeFeatures
from argaze.AreaOfInterest import AOIFeatures
from argaze.GazeAnalysis import DispersionBasedGazeMovementIdentifier
from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiVideo, TobiiSpecifications
from argaze.utils import MiscFeatures

import cv2 as cv
import numpy
import pandas

def main():
    """
    Project gaze positions into an AOI and identify particular gaze movements like fixations and saccades
    """

    # Manage arguments
    parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
    parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='path to a tobii segment folder', required=True)
    parser.add_argument('-a', '--aoi', metavar='AOI_NAME', type=str, default=None, help='aoi name where to project gaze', required=True)
    parser.add_argument('-t', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)')
    parser.add_argument('-dev', '--deviation_max_threshold', metavar='DISPERSION_THRESHOLD', type=int, default=None, help='maximal distance for fixation identification in pixel')
    parser.add_argument('-dmin', '--duration_min_threshold', metavar='DURATION_MIN_THRESHOLD', type=int, default=200, help='minimal duration for fixation identification in millisecond')
    parser.add_argument('-v', '--visu', metavar='VISU', type=bool, default=False, help='enable data visualisation')
    parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)')
    parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction)
    args = parser.parse_args()

    # Manage destination path
    destination_path = '.'
    if args.output != None:

        if not os.path.exists(os.path.dirname(args.output)):

            os.makedirs(os.path.dirname(args.output))
            print(f'{os.path.dirname(args.output)} folder created')

            destination_path = args.output

    else:

            destination_path = args.segment_path

            # Export into a dedicated time range folder
            if args.time_range[1] != None:
                timerange_path = f'[{int(args.time_range[0])}s - {int(args.time_range[1])}s]'
            else:
                timerange_path = f'[all]'

            destination_path = f'{destination_path}/{timerange_path}/{args.aoi}'

            if not os.path.exists(destination_path):

                os.makedirs(destination_path)
                print(f'{destination_path} folder created')

    aoi_filepath = f'{destination_path}/../aoi.json'

    positions_json_filepath = f'{destination_path}/gaze_positions.json'

    fixations_json_filepath = f'{destination_path}/gaze_fixations.json'
    saccades_json_filepath = f'{destination_path}/gaze_saccades.json'
    movements_json_filepath = f'{destination_path}/gaze_movements.json'
    gaze_status_json_filepath = f'{destination_path}/gaze_status.json'

    gaze_status_video_filepath = f'{destination_path}/gaze_status.mp4'
    gaze_status_image_filepath = f'{destination_path}/gaze_status.png'

    # Load aoi scene projection
    ts_aois_projections = DataStructures.TimeStampedBuffer.from_json(aoi_filepath)

    print(f'\nAOI frames: ', len(ts_aois_projections))
    aoi_names = ts_aois_projections.as_dataframe().drop(['offset','error'], axis=1).columns
    for aoi_name in aoi_names:
        print(f'\t{aoi_name}')

    # Load tobii segment
    tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None)

    # Get participant name
    participant_name = TobiiEntities.TobiiParticipant(f'{args.segment_path}/../../').name

    print(f'\nParticipant: {participant_name}')

    # Load a tobii segment video
    tobii_segment_video = tobii_segment.load_video()
    print(f'\nVideo properties:\n\tduration: {tobii_segment_video.duration/1e6} s\n\twidth: {tobii_segment_video.width} px\n\theight: {tobii_segment_video.height} px')

    # Check that gaze positions have already been exported to not process them again
    if os.path.exists(positions_json_filepath):

        # Load gaze positions
        ts_gaze_positions = GazeFeatures.TimeStampedGazePositions.from_json(positions_json_filepath)

        print(f'\nLoaded gaze positions count:')
        print(f'\tPositions: {len(ts_gaze_positions)}')

        invalid_gaze_position_count = 0
        inner_precisions_px = []

        for ts, gaze_position in ts_gaze_positions.items():

            if not gaze_position.valid: 

                invalid_gaze_position_count += 1

            else:

                inner_precisions_px.append(gaze_position.precision)

        print(f'\tInvalid positions: {invalid_gaze_position_count}/{len(ts_gaze_positions)} ({100*invalid_gaze_position_count/len(ts_gaze_positions):.2f} %)')

        inner_precision_px_mean = round(numpy.mean(inner_precisions_px))
        print(f'\tMean of projected precisions: {inner_precision_px_mean} px')

    # Project gaze positions into the selected AOI
    else:

        # Load a tobii segment data
        tobii_segment_data = tobii_segment.load_data()
        
        print(f'\nLoaded data count:')
        for name in tobii_segment_data.keys():
            print(f'\t{name}: {len(tobii_segment_data[name])} data')

        # Access to timestamped gaze position data buffer
        tobii_ts_gaze_positions = tobii_segment_data['GazePosition']

        # Access to timestamped gaze 3D positions data buffer
        tobii_ts_gaze_positions_3d = tobii_segment_data['GazePosition3D']

        # Format tobii gaze position and precision in pixel and project it in aoi scene
        ts_gaze_positions = GazeFeatures.TimeStampedGazePositions()

        # Gaze projection metrics
        ts_projection_metrics = DataStructures.TimeStampedBuffer()
        invalid_gaze_position_count = 0
        inner_precisions_px = []

        # Starting with no AOI projection
        ts_current_aoi = 0
        current_aoi = AOIFeatures.AreaOfInterest()

        # Initialise progress bar
        MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGazePositions projection:', suffix = 'Complete', length = 100)

        for ts, tobii_gaze_position in tobii_ts_gaze_positions.items():

            # Update Progress Bar
            progress = ts - int(args.time_range[0] * 1e6)
            MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'GazePositions projection:', suffix = 'Complete', length = 100)

            # Edit default aoi error
            current_aoi_error = 'No available AOI projection'

            try:

                # Get the last aoi projection until the current gaze position timestamp
                ts_current_aois, current_aois = ts_aois_projections.pop_last_until(ts)

                assert(ts_current_aois <= ts)

                # Catch aoi error to not update current aoi
                if 'error' in current_aois.keys():

                    # Remove extra error info after ':'
                    current_aoi_error = current_aois.pop('error').split(':')[0]

                # Or update current aoi
                elif args.aoi in current_aois.keys():

                    ts_current_aoi = ts_current_aois
                    current_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))

                    current_aoi_error = ''

            # No aoi projection at the beginning
            except KeyError as e:
                pass

            # Wait for available aoi
            if current_aoi.empty:

                ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(current_aoi_error)
                invalid_gaze_position_count += 1
                continue
                
            # QUESTION: What todo if the current aoi is too old ?
            # if the aoi didn't move it is not a problem...
            # For the moment, we avoid 1s old aoi and we provide a metric to assess the problem
            ts_difference = ts - ts_current_aoi

            # If aoi is not updated after the 
            if ts_difference >= args.duration_min_threshold*1e3:

                current_aoi = AOIFeatures.AreaOfInterest()
                ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition('AOI projection is too old (> 1s)')
                invalid_gaze_position_count += 1
                continue

            ts_projection_metrics[ts] = {'frame': ts_current_aois, 'age': ts_difference}

            # Test gaze position validity
            if tobii_gaze_position.validity == 0:

                gaze_position_px = (int(tobii_gaze_position.value[0] * tobii_segment_video.width), int(tobii_gaze_position.value[1] * tobii_segment_video.height))
                
                # Get gaze position 3D at same gaze position timestamp
                tobii_gaze_position_3d = tobii_ts_gaze_positions_3d.pop(ts)

                # Test gaze position 3d validity
                if tobii_gaze_position_3d.validity == 0:
                    
                    gaze_precision_mm = numpy.sin(numpy.deg2rad(TobiiSpecifications.PRECISION)) * tobii_gaze_position_3d.value[2]
                    tobii_camera_hfov_mm = numpy.sin(numpy.deg2rad(TobiiSpecifications.CAMERA_HFOV)) * tobii_gaze_position_3d.value[2]
                    
                    gaze_precision_px = round(tobii_segment_video.width * float(gaze_precision_mm) / float(tobii_camera_hfov_mm))

                    # Edit gaze position
                    gaze_position = GazeFeatures.GazePosition(gaze_position_px)

                    # Project gaze position into selected aois
                    if current_aoi.contains_point(gaze_position.value):

                        inner_x, inner_y = current_aoi.inner_axis(gaze_position.value)
                        inner_precision_px = gaze_precision_px * tobii_segment_video.width * tobii_segment_video.height / current_aoi.area

                        # Store inner precision for metrics
                        inner_precisions_px.append(inner_precision_px)

                        # Store inner gaze position for further movement processing
                        # TEMP: 1920x1080 are Screen_Plan dimensions
                        ts_gaze_positions[ts] = GazeFeatures.GazePosition((round(inner_x*1920), round((1.0 - inner_y)*1080)), precision=inner_precision_px)

                    else:

                        ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(f'GazePosition not inside {args.aoi}')
                        invalid_gaze_position_count += 1

                else:

                    ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(f'Invalid Tobii GazePosition3D')
                    invalid_gaze_position_count += 1

            else:

                ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition(f'Invalid Tobii GazePosition')
                invalid_gaze_position_count += 1

        print(f'\nGazePositions projection metrics:')

        print(f'\tInvalid positions: {invalid_gaze_position_count}/{len(tobii_ts_gaze_positions)} ({100*invalid_gaze_position_count/len(tobii_ts_gaze_positions):.2f} %)')

        if len(ts_projection_metrics):

            projection_metrics_dataframe = ts_projection_metrics.as_dataframe()
            print(f'\tAOI age mean: {projection_metrics_dataframe.age.mean() * 1e-3:.3f} ms')
            print(f'\tAOI age max: {projection_metrics_dataframe.age.max() * 1e-3:.3f} ms')

            inner_precision_px_mean = round(numpy.mean(inner_precisions_px))
            print(f'\tMean of projected precisions: {inner_precision_px_mean} px')

        else:

            print(print(f'\t no AOI projected'))

        ts_gaze_positions.to_json(positions_json_filepath)
        print(f'\nProjected gaze positions saved into {positions_json_filepath}')

    print(f'\nGazeMovement identifier setup:')

    if args.deviation_max_threshold == None:

        selected_deviation_max_threshold = inner_precision_px_mean
        print(f'\tDispersion threshold: {selected_deviation_max_threshold} px (equal to mean of projected precisions)')

    else:

        selected_deviation_max_threshold = args.deviation_max_threshold
        print(f'\tDispersion threshold: {selected_deviation_max_threshold} px')

    print(f'\tDuration threshold: {args.duration_min_threshold} ms')

    movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(selected_deviation_max_threshold, args.duration_min_threshold*1e3)

    # Start movement identification
    ts_fixations = GazeFeatures.TimeStampedGazeMovements()
    ts_saccades = GazeFeatures.TimeStampedGazeMovements()
    ts_movements = GazeFeatures.TimeStampedGazeMovements()
    ts_status = GazeFeatures.TimeStampedGazeStatus()

    # Initialise progress bar
    MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGazeMovements identification:', suffix = 'Complete', length = 100)

    for gaze_movement in movement_identifier(ts_gaze_positions):

        if isinstance(gaze_movement, DispersionBasedGazeMovementIdentifier.Fixation):

            start_ts, start_position = gaze_movement.positions.first

            ts_fixations[start_ts] = gaze_movement

            for ts, position in gaze_movement.positions.items():

                ts_status[ts] = GazeFeatures.GazeStatus.from_position(position, 'Fixation', len(ts_fixations))

        elif isinstance(gaze_movement, DispersionBasedGazeMovementIdentifier.Saccade):

            start_ts, start_position = gaze_movement.positions.first

            ts_saccades[start_ts] = gaze_movement

            for ts, position in gaze_movement.positions.items():

                ts_status[ts] = GazeFeatures.GazeStatus.from_position(position, 'Saccade', len(ts_saccades))

        else:

            start_ts, start_position = gaze_movement.positions.first

            ts_movements[start_ts] = gaze_movement

            for ts, position in gaze_movement.positions.items():

                ts_status[ts] = GazeFeatures.GazeStatus.from_position(position, 'GazeMovement', len(ts_movements))

        # Update Progress Bar
        progress = start_ts - int(args.time_range[0] * 1e6)
        MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'Gaze movements identification:', suffix = 'Complete', length = 100)

    print(f'\nGazeMovements identification metrics:')
    print(f'\t{len(ts_fixations)} fixations found')
    print(f'\t{len(ts_saccades)} saccades found')
    print(f'\t{len(ts_movements)} movements found')

    ts_fixations.to_json(fixations_json_filepath)
    print(f'\nGaze fixations saved into {fixations_json_filepath}')

    ts_saccades.to_json(saccades_json_filepath)
    print(f'Gaze saccades saved into {saccades_json_filepath}')

    ts_movements.to_json(movements_json_filepath)
    print(f'Gaze movements saved into {movements_json_filepath}')

    ts_status.to_json(gaze_status_json_filepath)
    print(f'Gaze status saved into {gaze_status_json_filepath}')

    # DEBUG
    ts_status.as_dataframe().to_csv(f'{destination_path}/gaze_status.csv')

    # Edit data visualisation
    if args.visu:

        # Prepare video exportation at the same format than segment video
        output_video = TobiiVideo.TobiiVideoOutput(gaze_status_video_filepath, tobii_segment_video.stream)

        # Reload aoi scene projection
        ts_aois_projections = DataStructures.TimeStampedBuffer.from_json(aoi_filepath)

        # Prepare gaze satus image
        gaze_status_matrix = numpy.zeros((1080, 1920, 3), numpy.uint8)

        # Video loop
        try:

            # Initialise progress bar
            MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGaze status video processing:', suffix = 'Complete', length = 100)

            fixations_exist = len(ts_fixations) > 0
            saccades_exist = len(ts_saccades) > 0
            movements_exist = len(ts_movements) > 0
            status_exist = len(ts_status) > 0

            if fixations_exist:
                current_fixation_ts, current_fixation = ts_fixations.pop_first()
                current_fixation_time_counter = 0

            if saccades_exist:
                current_saccade_ts, current_saccade = ts_saccades.pop_first()

            if movements_exist:
                current_movements_ts, current_movements = ts_movements.pop_first()

            # Iterate on video frames
            for video_ts, video_frame in tobii_segment_video.frames():

                # This video frame is the reference until the next frame
                # Here next frame is at + 40ms (25 fps)
                # TODO: Get video fps to adapt
                next_video_ts = video_ts + 40000 

                visu_matrix = numpy.zeros((1080, 1920, 3), numpy.uint8)

                try:

                    # Get current aoi projection at video frame time
                    ts_current_aois, current_aois = ts_aois_projections.pop_first()

                    assert(ts_current_aois == video_ts)

                    # Catch aoi error to not update current aoi
                    if 'error' in current_aois.keys():

                        # Display error (remove extra info after ':')
                        current_aoi_error = current_aois.pop('error').split(':')[0]

                        # Select color error
                        if current_aoi_error == 'VideoTimeStamp missing':
                            color_error = (0, 0, 255)
                        else:
                            color_error = (0, 255, 255)

                        cv.rectangle(visu_matrix, (0, 100), (550, 150), (127, 127, 127), -1)
                        cv.putText(visu_matrix, current_aoi_error, (20, 130), cv.FONT_HERSHEY_SIMPLEX, 1, color_error, 1, cv.LINE_AA)

                     # Or update current aoi
                    elif args.aoi in current_aois.keys():

                        ts_current_aoi = ts_current_aois
                        current_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))

                        # Apply perspective transform algorithm
                        destination = numpy.float32([[0, 1080],[1920, 1080],[1920, 0],[0, 0]])
                        aoi_matrix = cv.getPerspectiveTransform(current_aoi.astype(numpy.float32), destination)
                        visu_matrix = cv.warpPerspective(video_frame.matrix, aoi_matrix, (1920, 1080))

                # Wait for aois projection
                except KeyError:
                    pass

                if fixations_exist:

                    # Check next fixation
                    if video_ts >= current_fixation_ts + current_fixation.duration and len(ts_fixations) > 0:

                        current_fixation_ts, current_fixation = ts_fixations.pop_first()
                        current_fixation_time_counter = 0

                    # While current time belongs to the current fixation
                    if video_ts >= current_fixation_ts and video_ts < current_fixation_ts + current_fixation.duration:

                        current_fixation_time_counter += 1

                        # Draw current fixation
                        cv.circle(visu_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.deviation_max), (0, 255, 0), current_fixation_time_counter)
                        cv.circle(gaze_status_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.deviation_max), (0, 155, 0))

                if saccades_exist:
                  
                    # Check next saccade
                    if video_ts >= current_saccade_ts + current_saccade.duration and len(ts_saccades) > 0:

                        current_saccade_ts, current_saccade = ts_saccades.pop_first()

                    # While current time belongs to the current saccade
                    if video_ts >= current_saccade_ts and video_ts < current_saccade_ts + current_saccade.duration:
                        pass

                if movements_exist:

                    # Check next movements movement
                    if video_ts >= current_movements_ts + current_movements.duration and len(ts_movements) > 0:

                        current_movements_ts, current_movements = ts_movements.pop_first()

                    # While current time belongs to the current movements movement
                    if video_ts >= current_movements_ts and video_ts < current_movements_ts + current_movements.duration:
                        pass

                # Draw gaze status until next frame
                try:

                    # Get next gaze status
                    ts_start, start_gaze_status = ts_status.first
                    ts_next, next_gaze_status = ts_status.first

                    # Check next gaze status is not after next frame time
                    while ts_next < next_video_ts:

                        ts_start, start_gaze_status = ts_status.pop_first()
                        ts_next, next_gaze_status = ts_status.first

                        # Draw movement type
                        if start_gaze_status.valid and next_gaze_status.valid \
                        and start_gaze_status.movement_index ==  next_gaze_status.movement_index \
                        and start_gaze_status.movement_type ==  next_gaze_status.movement_type:

                            if next_gaze_status.movement_type == 'Fixation': 
                                movement_color = (0, 255, 0)
                            elif next_gaze_status.movement_type == 'Saccade': 
                                movement_color = (0, 0, 255)
                            else: 
                                movement_color = (255, 0, 0)

                            cv.line(visu_matrix, start_gaze_status, next_gaze_status, movement_color, 3)
                            cv.line(gaze_status_matrix, start_gaze_status, next_gaze_status, movement_color, 3)
                        
                # Empty gaze position
                except IndexError:
                    pass

                # Draw gaze positions until next frame
                try:

                    # Get next gaze position
                    ts_start, start_gaze_position = ts_gaze_positions.first
                    ts_next, next_gaze_position = ts_gaze_positions.first

                    # Gaze position count
                    gaze_position_count = 0
                    
                    # Check next gaze position is not after next frame time
                    while ts_next < next_video_ts:

                        ts_start, start_gaze_position = ts_gaze_positions.pop_first()
                        ts_next, next_gaze_position = ts_gaze_positions.first

                        if not start_gaze_position.valid:

                            # Select color error
                            if start_gaze_position.message == 'VideoTimeStamp missing':
                                color_error = (0, 0, 255)
                            else:
                                color_error = (0, 255, 255)

                            # Write unvalid error message
                            cv.putText(visu_matrix, f'{ts_start*1e-3:.3f} ms: {start_gaze_position.message}', (20, 1060 - (gaze_position_count)*50), cv.FONT_HERSHEY_SIMPLEX, 1, color_error, 1, cv.LINE_AA)
                        
                        # Draw start gaze
                        start_gaze_position.draw(visu_matrix, draw_precision=False)
                        start_gaze_position.draw(gaze_status_matrix, draw_precision=False)

                        if start_gaze_position.valid and next_gaze_position.valid:

                            # Draw movement from start to next
                            cv.line(visu_matrix, start_gaze_position, next_gaze_position, (0, 55, 55), 1)
                            cv.line(gaze_status_matrix, start_gaze_position, next_gaze_position, (0, 55, 55), 1)

                        gaze_position_count += 1

                    if start_gaze_position.valid:

                        # Write last start gaze position
                        cv.putText(visu_matrix, str(start_gaze_position.value), start_gaze_position.value, cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)

                    # Write last start gaze position timing
                    cv.rectangle(visu_matrix, (0, 50), (550, 100), (31, 31, 31), -1)
                    cv.putText(visu_matrix, f'Gaze time: {ts_start*1e-3:.3f} ms', (20, 85), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
        
                # Empty gaze position
                except IndexError:
                    pass
                
                # Write segment timing
                cv.rectangle(visu_matrix, (0, 0), (550, 50), (63, 63, 63), -1)
                cv.putText(visu_matrix, f'Video time: {video_ts*1e-3:.3f} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
                
                # Write movement identification parameters
                cv.rectangle(visu_matrix, (0, 150), (550, 310), (63, 63, 63), -1)
                cv.putText(visu_matrix, f'Deviation max: {selected_deviation_max_threshold} px', (20, 210), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
                cv.putText(visu_matrix, f'Duration min: {args.duration_min_threshold} ms', (20, 270), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)

                # Draw dispersion threshold circle
                cv.circle(visu_matrix, (selected_deviation_max_threshold + 400, 230), 2, (0, 255, 255), -1)
                cv.circle(visu_matrix, (selected_deviation_max_threshold + 400, 230), selected_deviation_max_threshold, (255, 150, 150), 1)

                if args.window:

                    # Close window using 'Esc' key
                    if cv.waitKey(1) == 27:
                        break

                    # Display video
                    cv.imshow(f'Segment {tobii_segment.id} movements', visu_matrix)

                # Write video
                output_video.write(visu_matrix)

                # Update Progress Bar
                progress = video_ts - int(args.time_range[0] * 1e6)
                MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'Gaze status video processing:', suffix = 'Complete', length = 100)

        # Exit on 'ctrl+C' interruption
        except KeyboardInterrupt:
            pass

        # Saving gaze status image
        cv.imwrite(gaze_status_image_filepath, gaze_status_matrix)

        # End output video file
        output_video.close()
        print(f'\nGaze status video saved into {gaze_status_video_filepath}\n')

if __name__ == '__main__':

    main()