aboutsummaryrefslogtreecommitdiff
path: root/src/argaze/utils/tobii_segment_display.py
blob: a106e7b8b5f30e4ecae70ce8a14b9cf659bf2cfa (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
#!/usr/bin/env python

import argparse

from argaze import GazeFeatures
from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiVideo, TobiiData
from argaze.utils import MiscFeatures

import numpy

import cv2 as cv

def main():
    """
    Display Tobii segment video and data
    """

    # manage arguments
    parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
    parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='segment path')
    parser.add_argument('-r', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)')
    args = parser.parse_args()

    if args.segment_path != None:

        # Load a tobii segment
        tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None)

        # Load a tobii segment video
        tobii_segment_video = tobii_segment.load_video()
        print(f'Video properties:\n\tduration: {tobii_segment_video.get_duration() / 1e6} s\n\twidth: {tobii_segment_video.get_width()} px\n\theight: {tobii_segment_video.get_height()} px')

        # Load a tobii segment data
        tobii_segment_data = tobii_segment.load_data()
        
        print(f'Loaded data count:')
        for name in tobii_segment_data.keys():
            print(f'\t{name}: {len(tobii_segment_data[name])} data')

        # Access to timestamped gaze position data buffer
        tobii_ts_gaze_positions = tobii_segment_data['GazePosition']

        # Access to timestamped gaze position 3d data buffer
        tobii_ts_gaze_positions_3d = tobii_segment_data['GazePosition3D']

        # Access to timestamped head rotations data buffer
        tobii_ts_head_rotations = tobii_segment_data['Gyroscope']

        # Access to timestamped events data buffer
        tobii_ts_events = tobii_segment_data['Event']

        # !!! the parameters below are specific to the TobiiGlassesPro2 !!!
        # Reference : https://www.biorxiv.org/content/10.1101/299925v1
        tobii_accuracy = 1.42 # degree
        tobii_precision = 0.34 # degree
        tobii_camera_hfov = 82 # degree
        tobii_visual_hfov = 160 # degree

        # Video and data replay loop
        try:

            # Initialise progress bar
            MiscFeatures.printProgressBar(0, tobii_segment_video.get_duration() / 1e3, prefix = 'Video progression:', suffix = 'Complete', length = 100)

            # Iterate on video frames
            for video_ts, video_frame in tobii_segment_video.frames():

                video_ts_ms = video_ts / 1e3

                try:

                    # Get nearest head rotation before video timestamp and remove all head rotations before
                    _, nearest_head_rotation = tobii_ts_head_rotations.pop_first_until(video_ts)

                    # Calculate head movement considering only head yaw and pitch
                    head_movement = numpy.array(nearest_head_rotation.value)
                    head_movement_px = head_movement.astype(int)
                    head_movement_norm = numpy.linalg.norm(head_movement[0:2])

                    # Draw movement vector
                    cv.line(video_frame.matrix, (int(video_frame.width/2), int(video_frame.height/2)), (int(video_frame.width/2) + head_movement_px[1], int(video_frame.height/2) - head_movement_px[0]), (150, 150, 150), 3)
                
                # Wait for head rotation
                except ValueError:
                    pass

                try:

                    # Get nearest gaze position before video timestamp and remove all gaze positions before
                    _, nearest_gaze_position = tobii_ts_gaze_positions.pop_first_until(video_ts)

                    # Ignore frame when gaze position is not valid
                    if nearest_gaze_position.validity == 0:
                    
                        gaze_position_pixel = GazeFeatures.GazePosition( (int(nearest_gaze_position.value[0] * video_frame.width), int(nearest_gaze_position.value[1] * video_frame.height)) )

                        # Draw gaze position
                        cv.circle(video_frame.matrix, gaze_position_pixel, 2, (0, 255, 255), -1)

                        # Get nearest gaze position 3D before video timestamp and remove all gaze positions before
                        _, nearest_gaze_position_3d = tobii_ts_gaze_positions_3d.pop_first_until(video_ts)

                        # Ignore frame when gaze position 3D is not valid
                        if nearest_gaze_position_3d.validity == 0:
                            
                            gaze_accuracy_mm = numpy.tan(numpy.deg2rad(tobii_accuracy)) * nearest_gaze_position_3d.value[2]
                            tobii_camera_hfov_mm = numpy.tan(numpy.deg2rad(tobii_camera_hfov / 2)) * nearest_gaze_position_3d.value[2]

                            gaze_position_pixel.accuracy = round(video_frame.width * float(gaze_accuracy_mm) / float(tobii_camera_hfov_mm))

                            # Draw gaze accuracy
                            cv.circle(video_frame.matrix, gaze_position_pixel, gaze_position_pixel.accuracy, (0, 255, 255), 1)
                    
                # Wait for gaze position
                except ValueError:
                    pass

                try:

                    # Get nearest event before video timestamp and remove all gaze positions before
                    nearest_event_ts, nearest_event = tobii_ts_events.pop_first_until(video_ts)

                    #print(nearest_event_ts / 1e3, nearest_event)

                    # Write events
                    cv.rectangle(video_frame.matrix, (0, 0), (550, 50), (63, 63, 63), -1)
                    cv.putText(video_frame.matrix, str(nearest_event), (20, 140), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)

                # Wait for events
                except ValueError:
                    pass

                # Draw center
                cv.line(video_frame.matrix, (int(video_frame.width/2) - 50, int(video_frame.height/2)), (int(video_frame.width/2) + 50, int(video_frame.height/2)), (255, 150, 150), 1)
                cv.line(video_frame.matrix, (int(video_frame.width/2), int(video_frame.height/2) - 50), (int(video_frame.width/2), int(video_frame.height/2) + 50), (255, 150, 150), 1)

                # Write segment timing
                cv.rectangle(video_frame.matrix, (0, 0), (550, 50), (63, 63, 63), -1)
                cv.putText(video_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)

                if args.window:
                
                    # Close window using 'Esc' key
                    if cv.waitKey(1) == 27:
                        break

                    cv.imshow(f'Segment {tobii_segment.get_id()} video', video_frame.matrix)

                # Update Progress Bar
                progress = video_ts_ms - int(args.time_range[0] * 1e3)
                MiscFeatures.printProgressBar(progress, tobii_segment_video.get_duration() / 1e3, prefix = 'Video progression:', suffix = 'Complete', length = 100)

        # Exit on 'ctrl+C' interruption
        except KeyboardInterrupt:
            pass

        # Stop frame display
        cv.destroyAllWindows()

if __name__ == '__main__':

    main()