1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
|
#!/usr/bin/env python
import argparse
import os
from argaze import DataStructures
from argaze.TobiiGlassesPro2 import *
from argaze.ArUcoMarkers import ArUcoTracker, ArUcoCamera
from argaze.AreaOfInterest import *
from argaze.TobiiGlassesPro2 import *
import cv2 as cv
import numpy
def main():
"""
Track any ArUco marker into Tobii Glasses Pro 2 camera video stream.
From a loaded AOI scene .obj file, position the scene virtually relatively to any detected ArUco markers and project the scene into camera frame.
Then, detect if Tobii gaze point is inside any AOI.
"""
# Manage arguments
parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
parser.add_argument('-t', '--tobii_ip', metavar='TOBII_IP', type=str, default='192.168.1.10', help='tobii glasses ip')
parser.add_argument('-c', '--camera_calibration', metavar='CAM_CALIB', type=str, default='tobii_camera.json', help='json camera calibration filepath')
parser.add_argument('-a', '--aoi_scene', metavar='AOI_SCENE', type=str, default='aoi3D_scene.obj', help='obj aoi scene filepath')
parser.add_argument('-d', '--dictionary', metavar='DICT', type=str, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL,DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)')
parser.add_argument('-m', '--marker_size', metavar='MKR', type=float, default=6, help='aruco marker size (cm)')
parser.add_argument('-i', '--markers_id', metavar='MARKERS_ID', nargs='*', type=int, default=[], help='markers id to track')
args = parser.parse_args()
empty_marker_set = len(args.markers_id) == 0
if empty_marker_set:
print(f'Track any Aruco markers from the {args.dictionary} dictionary')
else:
print(f'Track Aruco markers {args.markers_id} from the {args.dictionary} dictionary')
# Create tobii controller
tobii_controller = TobiiController.TobiiController(args.tobii_ip, 'myProject', 'mySelf')
# Calibrate tobii glasses
tobii_controller.calibrate()
# Enable tobii data stream
tobii_data_stream = tobii_controller.enable_data_stream()
# Enable tobii video stream
tobii_video_stream = tobii_controller.enable_video_stream()
# create aruco camera
aruco_camera = ArUcoCamera.ArUcoCamera()
aruco_camera.load_calibration_file(args.camera_calibration)
# Create aruco tracker
aruco_tracker = ArUcoTracker.ArUcoTracker(args.dictionary, args.marker_size, aruco_camera)
# Create AOIs 3D scene
aoi3D_scene = AOI3DScene.AOI3DScene()
aoi3D_scene.load(args.aoi_scene)
print(f'AOIs names: {aoi3D_scene.areas()}')
# Start streaming
tobii_controller.start_streaming()
# Live video stream capture loop
try:
past_gaze_positions = DataStructures.TimeStampedBuffer()
while tobii_video_stream.is_alive():
video_ts, video_frame = tobii_video_stream.read()
try:
# Read data stream
data_stream = tobii_data_stream.read()
# Store received gaze positions
past_gaze_positions.append(data_stream.gidx_l_gp)
# Get last gaze position before video timestamp and remove all former gaze positions
earliest_ts, earliest_gaze_position = past_gaze_positions.pop_first_until(video_ts)
# Draw video synchronized gaze position
gaze_position = GazeFeatures.GazePosition((int(earliest_gaze_position.gp[0] * video_frame.width), int(earliest_gaze_position.gp[1] * video_frame.height)))
cv.circle(video_frame.matrix, gaze_position, 4, (0, 255, 255), -1)
# When expected values aren't in data stream
except (KeyError, AttributeError, ValueError):
pass # keep last gaze position
# Track markers with pose estimation and draw them
aruco_tracker.track(video_frame.matrix)
aruco_tracker.draw(video_frame.matrix)
# Project 3D scenes related to each aruco markers
if aruco_tracker.get_markers_number():
for (i, marker_id) in enumerate(aruco_tracker.get_markers_ids()):
# TODO : Select different 3D scenes depending on aruco id
in_marker_set = marker_id in list(args.markers_id)
if not empty_marker_set and not in_marker_set:
continue
aoi3D_scene.rotation = aruco_tracker.get_marker_rotation(i)
aoi3D_scene.translation = aruco_tracker.get_marker_translation(i)
# Edit Zero distorsion matrix
D0 = numpy.asarray([0.0, 0.0, 0.0, 0.0, 0.0])
# DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it
# This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable.
aoi2D_scene = aoi3D_scene.project(aruco_camera.get_K(), D0)
# Check which 2D aois is looked
if gaze_position != None:
aoi2D_scene.look_at(gaze_position)
# Draw 2D aois
aoi2D_scene.draw(video_frame.matrix)
# Close window using 'Esc' key
if cv.waitKey(1) == 27:
break
cv.imshow('Live Scene', video_frame.matrix)
# Exit on 'ctrl+C' interruption
except KeyboardInterrupt:
pass
# Stop frame display
cv.destroyAllWindows()
# Stop streaming
tobii_controller.stop_streaming()
if __name__ == '__main__':
main()
|