aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/argaze/utils/tobii_stream_aruco_aoi_display.py199
-rw-r--r--src/argaze/utils/tobii_stream_display.py22
2 files changed, 186 insertions, 35 deletions
diff --git a/src/argaze/utils/tobii_stream_aruco_aoi_display.py b/src/argaze/utils/tobii_stream_aruco_aoi_display.py
index 1b7ab2d..c57bba3 100644
--- a/src/argaze/utils/tobii_stream_aruco_aoi_display.py
+++ b/src/argaze/utils/tobii_stream_aruco_aoi_display.py
@@ -1,18 +1,18 @@
- #!/usr/bin/env python
+#!/usr/bin/env python
import argparse
-import os
+import os, time
+import json
-from argaze import DataStructures, GazeFeatures
+from argaze import DataStructures
+from argaze import GazeFeatures
from argaze.TobiiGlassesPro2 import *
-from argaze.ArUcoMarkers import ArUcoTracker, ArUcoCamera
+from argaze.ArUcoMarkers import *
from argaze.AreaOfInterest import *
-from argaze.TobiiGlassesPro2 import *
+from argaze.utils import MiscFeatures
-import cv2 as cv
import numpy
-
-from ivy.std_api import *
+import cv2 as cv
def main():
"""
@@ -31,7 +31,11 @@ def main():
parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction)
args = parser.parse_args()
- print(f'Track any Aruco markers from the {args.marker_dictionary} dictionary')
+ # Manage markers id to track
+ if args.marker_id_scene == None:
+ print(f'Track any Aruco markers from the {args.marker_dictionary} dictionary')
+ else:
+ print(f'Track Aruco markers {list(args.marker_id_scene.keys())} from the {args.marker_dictionary} dictionary')
# Create tobii controller
tobii_controller = TobiiController.TobiiController(args.tobii_ip, 'myProject', 'mySelf')
@@ -68,7 +72,6 @@ def main():
# Load AOI 3D scene for each marker and create a AOI 2D scene and frame when a 'Visualisation_Plan' AOI exist
aoi3D_scenes = {}
aoi2D_visu_scenes = {}
- aoi2D_visu_frames = {}
for marker_id, aoi_scene_filepath in args.marker_id_scene.items():
@@ -88,46 +91,180 @@ def main():
# Create timestamped buffer to store AOIs scene in time
ts_aois_scenes = AOIFeatures.TimeStampedAOIScenes()
+ # Prepare to timestamped head rotations data stream bufferring
+ tobii_ts_head_rotations = DataStructures.TimeStampedBuffer()
+
# Start streaming
tobii_controller.start_streaming()
# Live video stream capture loop
try:
- past_gaze_positions = DataStructures.TimeStampedBuffer()
-
+ # Assess temporal preformance
+ fps = 0
+ current_time = time.time()
+ frame_counter = 0
+
+ # Detect head movement
+ head_moving = False
+ head_movement_last = 0.
+
while tobii_video_stream.is_alive():
+ # Read video stream
video_ts, video_frame = tobii_video_stream.read()
+ video_ts_ms = video_ts / 1e3
+
+ # Copy video frame to edit visualisation on it without disrupting aruco tracking
+ visu_frame = video_frame.copy()
+
+ # Read data stream
+ data_ts, data_stream = tobii_data_stream.read()
+ data_ts_ms = data_ts / 1e3
try:
- # Read data stream
- data_stream = tobii_data_stream.read()
+ # Buffer last received gaze positions 3d
+ tobii_ts_head_rotations.append(data_stream['Gyroscope'])
+
+ # Ignore missing data stream
+ except KeyError as e:
+ pass
+
+ # Process video and data frame
+ try:
+
+ # Get nearest head rotation before video timestamp and remove all head rotations before
+ _, nearest_head_rotation = tobii_ts_head_rotations.pop_first_until(video_ts)
+
+ # Calculate head movement considering only head yaw and pitch
+ head_movement = numpy.array(nearest_head_rotation.value)
+ head_movement_px = head_movement.astype(int)
+ head_movement_norm = numpy.linalg.norm(head_movement[0:2])
+
+ # Draw movement vector
+ cv.line(visu_frame.matrix, (int(visu_frame.width/2), int(visu_frame.height/2)), (int(visu_frame.width/2) + head_movement_px[1], int(visu_frame.height/2) - head_movement_px[0]), (150, 150, 150), 3)
+
+ # Head movement detection hysteresis
+ # TODO : pass the threshold value as argument
+ if not head_moving and head_movement_norm > 50:
+ head_moving = True
+
+ if head_moving and head_movement_norm < 10:
+ head_moving = False
+
+ # When head is moving, ArUco tracking could return bad pose estimation and so bad AOI scene projection
+ if head_moving:
+
+ ts_aois_scenes[round(video_ts_ms)] = AOIFeatures.EmptyAOIScene()
+
+ raise UserWarning('Head is moving')
+
+ # Hide frame left and right borders before tracking to ignore markers outside focus area
+ cv.rectangle(video_frame.matrix, (0, 0), (int(video_frame.width/6), int(video_frame.height)), (0, 0, 0), -1)
+ cv.rectangle(video_frame.matrix, (int(video_frame.width*(1 - 1/6)), 0), (int(video_frame.width), int(video_frame.height)), (0, 0, 0), -1)
+
+ # Track markers with pose estimation and draw them
+ aruco_tracker.track(video_frame.matrix)
+ aruco_tracker.draw(visu_frame.matrix)
+
+ # When no marker is detected, no AOI scene projection can't be done
+ if aruco_tracker.get_markers_number() == 0:
+
+ ts_aois_scenes[round(video_ts_ms)] = AOIFeatures.EmptyAOIScene()
+
+ raise UserWarning('No marker detected')
+
+ # Store aoi 2D video for further scene merging
+ aoi2D_dict = {}
+
+ # Project 3D scene on each video frame and the visualisation frame
+ for (i, marker_id) in enumerate(aruco_tracker.get_markers_ids()):
+
+ # Copy 3D scene related to detected marker
+ aoi3D_scene = aoi3D_scene_selector(marker_id)
+
+ if aoi3D_scene == None:
+ continue
+
+ # Transform scene into camera referential
+ aoi3D_camera = aoi3D_scene.transform(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i))
+
+ # Get aoi inside vision cone field
+ cone_vision_height_cm = 200 # cm
+ cone_vision_radius_cm = numpy.tan(numpy.deg2rad(TobiiSpecifications.VISUAL_HFOV / 2)) * cone_vision_height_cm
+
+ aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(cone_vision_radius_cm, cone_vision_height_cm)
+
+ # Keep only aoi inside vision cone field
+ aoi3D_scene = aoi3D_scene.copy(exclude=aoi3D_outside.keys())
+
+ # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it
+ # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable.
+ aoi2D_video_scene = aoi3D_scene.project(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i), aruco_camera.get_K())
+
+ # Store each 2D aoi for further scene merging
+ for name, aoi in aoi2D_video_scene.items():
+
+ if name not in aoi2D_dict.keys():
+ aoi2D_dict[name] = []
+
+ aoi2D_dict[name].append(aoi.clockwise())
+
+ # Merge all 2D aoi into a single 2D scene
+ aoi2D_merged_scene = AOI2DScene.AOI2DScene()
+ for name, aoi_array in aoi2D_dict.items():
+ aoi2D_merged_scene[name] = numpy.sum(aoi_array, axis=0) / len(aoi_array)
+
+ aoi2D_merged_scene.draw(visu_frame.matrix, (0, 0))
+
+ # Store 2D merged scene at this time in millisecond
+ ts_aois_scenes[round(video_ts_ms)] = aoi2D_merged_scene
+
+ # Warn user when the merged scene is empty
+ if len(aoi2D_merged_scene.keys()) == 0:
+
+ raise UserWarning('Scene is empty')
+
+ # Write warning
+ except UserWarning as w:
- # Store received gaze positions
- past_gaze_positions.append(data_stream['GazePosition'])
+ cv.rectangle(visu_frame.matrix, (0, 50), (550, 100), (127, 127, 127), -1)
+ cv.putText(visu_frame.matrix, str(w), (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
- # Get last gaze position before video timestamp and remove all former gaze positions
- earliest_ts, earliest_gaze_position = past_gaze_positions.pop_first_until(video_ts)
+ # Raised when timestamped buffer is empty
+ except KeyError:
+ pass
- # Draw gaze position
- video_gaze_pixel = (int(earliest_gaze_position.value[0] * video_frame.width), int(earliest_gaze_position.value[1] * video_frame.height))
- cv.circle(video_frame.matrix, video_gaze_pixel, 4, (0, 255, 255), -1)
+ # Assess temporal performance
+ frame_counter += 1
- # Wait for gaze position
- except (AttributeError, ValueError):
- continue
+ if frame_counter == 25:
+ fps = 25 / (time.time() - current_time)
+ current_time = time.time()
+ frame_counter = 0
- # Track markers with pose estimation and draw them
- aruco_tracker.track(video_frame.matrix)
- aruco_tracker.draw(video_frame.matrix)
+ # Draw focus area
+ cv.rectangle(visu_frame.matrix, (int(video_frame.width/6), 0), (int(visu_frame.width*(1-1/6)), int(visu_frame.height)), (255, 150, 150), 1)
+
+ # Draw center
+ cv.line(visu_frame.matrix, (int(visu_frame.width/2) - 50, int(visu_frame.height/2)), (int(visu_frame.width/2) + 50, int(visu_frame.height/2)), (255, 150, 150), 1)
+ cv.line(visu_frame.matrix, (int(visu_frame.width/2), int(visu_frame.height/2) - 50), (int(visu_frame.width/2), int(visu_frame.height/2) + 50), (255, 150, 150), 1)
+
+ # Write stream timing
+ cv.rectangle(visu_frame.matrix, (0, 0), (1100, 50), (63, 63, 63), -1)
+ cv.putText(visu_frame.matrix, f'Data stream time: {int(data_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+ cv.putText(visu_frame.matrix, f'Video delay: {int(data_ts_ms - video_ts_ms)} ms', (550, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+ cv.putText(visu_frame.matrix, f'Fps: {int(fps)}', (950, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+
+ if args.window:
- # Close window using 'Esc' key
- if cv.waitKey(1) == 27:
- break
+ # Close window using 'Esc' key
+ if cv.waitKey(1) == 27:
+ break
- cv.imshow('Live Scene', video_frame.matrix)
+ # Display visualisation
+ cv.imshow(f'Stream ArUco AOI', visu_frame.matrix)
# Exit on 'ctrl+C' interruption
except KeyboardInterrupt:
diff --git a/src/argaze/utils/tobii_stream_display.py b/src/argaze/utils/tobii_stream_display.py
index 76da3d6..006cf81 100644
--- a/src/argaze/utils/tobii_stream_display.py
+++ b/src/argaze/utils/tobii_stream_display.py
@@ -29,9 +29,6 @@ def main():
# Enable tobii video stream
tobii_video_stream = tobii_controller.enable_video_stream()
- # Start streaming
- tobii_controller.start_streaming()
-
# Prepare to timestamped gaze position data stream bufferring
tobii_ts_gaze_positions = DataStructures.TimeStampedBuffer()
@@ -41,9 +38,17 @@ def main():
# Prepare to timestamped head rotations data stream bufferring
tobii_ts_head_rotations = DataStructures.TimeStampedBuffer()
+ # Start streaming
+ tobii_controller.start_streaming()
+
# Live video and data stream capture loop
try:
+ # Assess temporal preformance
+ fps = 0
+ current_time = time.time()
+ frame_counter = 0
+
while tobii_video_stream.is_alive():
# Read video stream
@@ -114,14 +119,23 @@ def main():
except KeyError:
pass
+ # Assess temporal performance
+ frame_counter += 1
+
+ if frame_counter == 25:
+ fps = 25 / (time.time() - current_time)
+ current_time = time.time()
+ frame_counter = 0
+
# Draw center
cv.line(video_frame.matrix, (int(video_frame.width/2) - 50, int(video_frame.height/2)), (int(video_frame.width/2) + 50, int(video_frame.height/2)), (255, 150, 150), 1)
cv.line(video_frame.matrix, (int(video_frame.width/2), int(video_frame.height/2) - 50), (int(video_frame.width/2), int(video_frame.height/2) + 50), (255, 150, 150), 1)
# Write stream timing
- cv.rectangle(video_frame.matrix, (0, 0), (950, 50), (63, 63, 63), -1)
+ cv.rectangle(video_frame.matrix, (0, 0), (1100, 50), (63, 63, 63), -1)
cv.putText(video_frame.matrix, f'Data stream time: {int(data_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
cv.putText(video_frame.matrix, f'Video delay: {int(data_ts_ms - video_ts_ms)} ms', (550, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+ cv.putText(video_frame.matrix, f'Fps: {int(fps)}', (950, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
# Close window using 'Esc' key
if cv.waitKey(1) == 27: