aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorThéo de la Hogue2022-09-21 15:24:11 +0200
committerThéo de la Hogue2022-09-21 15:24:11 +0200
commit10882a064e2c85a73e0aa006409e6f49b11b6f97 (patch)
tree00715b5cce01b0f70ec83141d636474e3c108ecb /src
parent0bda6596ae6c765bb57e47c60994a3df6abfb429 (diff)
downloadargaze-10882a064e2c85a73e0aa006409e6f49b11b6f97.zip
argaze-10882a064e2c85a73e0aa006409e6f49b11b6f97.tar.gz
argaze-10882a064e2c85a73e0aa006409e6f49b11b6f97.tar.bz2
argaze-10882a064e2c85a73e0aa006409e6f49b11b6f97.tar.xz
Updating empty scene handling. Removing visual scan processing.
Diffstat (limited to 'src')
-rw-r--r--src/argaze/utils/tobii_segment_aruco_aoi_export.py156
1 files changed, 26 insertions, 130 deletions
diff --git a/src/argaze/utils/tobii_segment_aruco_aoi_export.py b/src/argaze/utils/tobii_segment_aruco_aoi_export.py
index a3a31d0..b1fe4ed 100644
--- a/src/argaze/utils/tobii_segment_aruco_aoi_export.py
+++ b/src/argaze/utils/tobii_segment_aruco_aoi_export.py
@@ -18,8 +18,7 @@ def main():
"""
Track ArUco markers into Tobii Glasses Pro 2 segment video file.
For each loaded AOI scene .obj file, position the scene virtually relatively to each detected ArUco markers and project the scene into camera frame.
- Then, detect if Tobii gaze point is inside any AOI.
- Export AOIs video and data.
+ Export AOIs video and data as a aruco_aoi.csv, aruco_aoi.mp4 files
"""
# Manage arguments
@@ -71,9 +70,8 @@ def main():
os.makedirs(destination_path)
print(f'{destination_path} folder created')
- vs_data_filepath = f'{destination_path}/visual_scan.csv'
- vs_visu_filepath = f'{destination_path}/visual_scan_marker_%d.jpg'
- vs_video_filepath = f'{destination_path}/visual_scan.mp4'
+ vs_data_filepath = f'{destination_path}/aruco_aoi.csv'
+ vs_video_filepath = f'{destination_path}/aruco_aoi.mp4'
# Load a tobii segment
tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None)
@@ -89,12 +87,6 @@ def main():
for name in tobii_segment_data.keys():
print(f'\t{name}: {len(tobii_segment_data[name])} data')
- # Access to timestamped gaze positions data buffer
- tobii_ts_gaze_positions = tobii_segment_data['GazePosition']
-
- # Access to timestamped gaze 3D positions data buffer
- tobii_ts_gaze_positions_3d = tobii_segment_data['GazePosition3D']
-
# Access to timestamped head rotations data buffer
tobii_ts_head_rotations = tobii_segment_data['Gyroscope']
@@ -111,7 +103,7 @@ def main():
else:
- raise ValueError('.json camera calibration filepath required. Use -c option.')
+ raise UserWarning('.json camera calibration filepath required. Use -c option.')
# Create aruco tracker
aruco_tracker = ArUcoTracker.ArUcoTracker(args.marker_dictionary, args.marker_size, aruco_camera)
@@ -139,53 +131,14 @@ def main():
print(f'AOI in {os.path.basename(aoi_scene_filepath)} scene related to marker #{marker_id}:')
for aoi in aoi3D_scenes[marker_id].keys():
- # If a 'Visualisation_Plan' AOI exist
- # TODO: document this deep feature !!!
- if aoi == 'Visualisation_Plan':
-
- print(f'\tVisualisation_Plan detected: a visual scan picture will be output for this marker.')
-
- # Create a visual scan visualisation frame
- visu_width, visu_height = 1920, 1080
- scene_width, scene_height, __ = aoi3D_scenes[marker_id].size()
-
- aoi2D_visu_frames[marker_id] = numpy.full((visu_height, visu_width, 3), 255, dtype=numpy.uint8)
-
- if args.time_range != (0., None):
- cv.putText(aoi2D_visu_frames[marker_id], f'Segment time range: {int(args.time_range[0] * 1000)} - {int(args.time_range[1] * 1000)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv.LINE_AA)
-
- # Project 3D scene onto the visualisation plan
- aoi3D_scene_rotation = numpy.array([[-numpy.pi, 0.0, 0.0]])
- aoi3D_scene_translation = aoi3D_scenes[marker_id].center()*[-1, 1, 0] + [0, 0, scene_height]
-
- # Edit a projection matrix for the reference frame
- K0 = numpy.array([[visu_height, 0.0, visu_width/2], [0.0, visu_height, visu_height/2], [0.0, 0.0, 1.0]])
-
- aoi2D_visu_scenes[marker_id] = aoi3D_scenes[marker_id].project(aoi3D_scene_translation, aoi3D_scene_rotation, K0)
-
- for name, aoi in aoi2D_visu_scenes[marker_id].items():
- if name != 'Visualisation_Plan':
- aoi.draw(aoi2D_visu_frames[marker_id], (0, 0, 0))
-
- else:
-
- print(f'\t{aoi}')
+ print(f'\t{aoi}')
def aoi3D_scene_selector(marker_id):
return aoi3D_scenes.get(marker_id, None)
- def aoi2D_visu_scene_selector(marker_id):
- return aoi2D_visu_scenes.get(marker_id, None)
-
- def aoi2D_visu_frame_selector(marker_id):
- return aoi2D_visu_frames.get(marker_id, None)
-
# Create timestamped buffer to store AOIs scene in time
ts_aois_scenes = AOIFeatures.TimeStampedAOIScenes()
- # Create timestamped buffer to store gaze positions in time
- ts_gaze_positions = GazeFeatures.TimeStampedGazePositions()
-
# !!! the parameters below are specific to the TobiiGlassesPro2 !!!
# Reference : https://www.biorxiv.org/content/10.1101/299925v1
tobii_accuracy = 1.42 # degree
@@ -234,37 +187,10 @@ def main():
# When head is moving, ArUco tracking could return bad pose estimation and so bad AOI scene projection
if head_moving:
- raise AOIFeatures.AOISceneMissing('Head is moving')
-
- # Get nearest gaze position before video timestamp and remove all gaze positions before
- _, nearest_gaze_position = tobii_ts_gaze_positions.pop_first_until(video_ts)
-
- # Ignore frame when gaze position is not valid
- if nearest_gaze_position.validity == 1:
- raise GazeFeatures.GazePositionMissing('Unvalid gaze position')
-
- gaze_position_pixel = GazeFeatures.GazePosition( (int(nearest_gaze_position.value[0] * visu_frame.width), int(nearest_gaze_position.value[1] * visu_frame.height)) )
-
- # Draw gaze position
- cv.circle(visu_frame.matrix, gaze_position_pixel, 2, (0, 255, 255), -1)
-
- # Get nearest gaze position 3D before video timestamp and remove all gaze positions before
- _, nearest_gaze_position_3d = tobii_ts_gaze_positions_3d.pop_first_until(video_ts)
- # Ignore frame when gaze position 3D is not valid
- if nearest_gaze_position_3d.validity == 1:
- raise GazeFeatures.GazePositionMissing('Unvalid gaze position 3D')
-
- gaze_accuracy_mm = numpy.tan(numpy.deg2rad(tobii_accuracy)) * nearest_gaze_position_3d.value[2]
- tobii_camera_hfov_mm = numpy.tan(numpy.deg2rad(tobii_camera_hfov / 2)) * nearest_gaze_position_3d.value[2]
-
- gaze_position_pixel.accuracy = round(visu_frame.width * float(gaze_accuracy_mm) / float(tobii_camera_hfov_mm))
-
- # Draw gaze accuracy
- cv.circle(visu_frame.matrix, gaze_position_pixel, gaze_position_pixel.accuracy, (0, 255, 255), 1)
-
- # Store gaze position in millisecond for further visual scan processing
- ts_gaze_positions[round(video_ts_ms)] = gaze_position_pixel
+ ts_aois_scenes[round(video_ts_ms)] = AOIFeatures.EmptyAOIScene()
+
+ raise UserWarning('Head is moving')
# Hide frame left and right borders before tracking to ignore markers outside focus area
cv.rectangle(video_frame.matrix, (0, 0), (int(video_frame.width/6), int(video_frame.height)), (0, 0, 0), -1)
@@ -276,7 +202,10 @@ def main():
# When no marker is detected, no AOI scene projection can't be done
if aruco_tracker.get_markers_number() == 0:
- raise AOIFeatures.AOISceneMissing('No marker detected')
+
+ ts_aois_scenes[round(video_ts_ms)] = AOIFeatures.EmptyAOIScene()
+
+ raise UserWarning('No marker detected')
# Store aoi 2D video for further scene merging
aoi2D_dict = {}
@@ -294,7 +223,7 @@ def main():
aoi3D_camera = aoi3D_scene.transform(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i))
# Get aoi inside vision cone field
- cone_vision_height_cm = nearest_gaze_position_3d.value[2]/10 # cm
+ cone_vision_height_cm = 200 # cm
cone_vision_radius_cm = numpy.tan(numpy.deg2rad(tobii_visual_hfov / 2)) * cone_vision_height_cm
aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(cone_vision_radius_cm, cone_vision_height_cm)
@@ -314,51 +243,28 @@ def main():
aoi2D_dict[name].append(aoi.clockwise())
- # Select 2D visu scene if there is one for the detected marker
- aoi2D_visu_scene = aoi2D_visu_scene_selector(marker_id)
- aoi2D_visu_frame = aoi2D_visu_frame_selector(marker_id)
-
- if aoi2D_visu_scene == None:
- continue
-
- look_at = aoi2D_video_scene['Visualisation_Plan'].look_at(gaze_position_pixel)
-
- visu_gaze_pixel = aoi2D_visu_scene['Visualisation_Plan'].looked_pixel(look_at)
- cv.circle(aoi2D_visu_frame, visu_gaze_pixel, 4, (0, 0, 255), -1)
-
# Merge all 2D aoi into a single 2D scene
aoi2D_merged_scene = AOI2DScene.AOI2DScene()
for name, aoi_array in aoi2D_dict.items():
aoi2D_merged_scene[name] = numpy.sum(aoi_array, axis=0) / len(aoi_array)
- aoi2D_merged_scene.draw(visu_frame.matrix, gaze_position_pixel, exclude=['Visualisation_Plan'])
+ aoi2D_merged_scene.draw(visu_frame.matrix, (0, 0))
- # When the merged scene is empty
- if len(aoi2D_merged_scene.keys()) == 0:
- raise AOIFeatures.AOISceneMissing('Scene is empty')
-
# Store 2D merged scene at this time in millisecond
ts_aois_scenes[round(video_ts_ms)] = aoi2D_merged_scene
- # Raised when gaze data is missing
- except GazeFeatures.GazePositionMissing as e:
-
- # Store missing gaze data exception
- ts_gaze_positions[round(video_ts_ms)] = e
-
- cv.rectangle(visu_frame.matrix, (0, 50), (550, 100), (127, 127, 127), -1)
- cv.putText(visu_frame.matrix, str(e), (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ # Warn user when the merged scene is empty
+ if len(aoi2D_merged_scene.keys()) == 0:
- # Raised when aoi scene is missing
- except AOIFeatures.AOISceneMissing as e:
+ raise UserWarning('Scene is empty')
- # Store missing scene exception
- ts_aois_scenes[round(video_ts_ms)] = e
+ # Write warning
+ except UserWarning as w:
cv.rectangle(visu_frame.matrix, (0, 50), (550, 100), (127, 127, 127), -1)
- cv.putText(visu_frame.matrix, str(e), (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ cv.putText(visu_frame.matrix, str(w), (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
- # Raised when buffer is empty
+ # Raised when timestamped buffer is empty
except ValueError:
pass
@@ -413,22 +319,12 @@ def main():
for marker_id, rejected_count in rejected_counts.items():
print(f'Markers {marker_id} has been rejected in {rejected_count} / {try_count} frames ({round(100 * rejected_count / try_count, 2)} %)')
- # Build visual scan based on a pointer position
- visual_scan = GazeFeatures.PointerBasedVisualScan(ts_aois_scenes, ts_gaze_positions)
- print(f'{len(visual_scan.steps())} visual scan steps found')
-
- # Export visual scan data
- visual_scan.export_as_csv(vs_data_filepath)
- print(f'Visual scan data saved into {vs_data_filepath}')
-
- # Export each visual scan picture
- for marker_id, aoi2D_visu_frame in aoi2D_visu_frames.items():
- cv.imwrite(vs_visu_filepath % marker_id, visu_frame.matrix)
- print(f'Visual scan picture for marker {marker_id} saved into {vs_visu_filepath % marker_id}')
-
- # Notify when the visual scan video has been exported
- print(f'Visual scan video saved into {vs_video_filepath}')
+ # Export aruco aoi data
+ ts_aois_scenes.export_as_csv(vs_data_filepath, exclude=['dimension'])
+ print(f'Aruco AOI data saved into {vs_data_filepath}')
+ # Notify when the aruco aoi video has been exported
+ print(f'Aruco AOI video saved into {vs_video_filepath}')
if __name__ == '__main__':