aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/argaze/utils/tobii_segment_arscene_export.py178
-rw-r--r--src/argaze/utils/tobii_segment_gaze_metrics_export.py44
-rw-r--r--src/argaze/utils/tobii_segment_gaze_movements_export.py53
-rw-r--r--src/argaze/utils/tobii_stream_arscene_display.py29
4 files changed, 132 insertions, 172 deletions
diff --git a/src/argaze/utils/tobii_segment_arscene_export.py b/src/argaze/utils/tobii_segment_arscene_export.py
index cc180a2..b2cc0e0 100644
--- a/src/argaze/utils/tobii_segment_arscene_export.py
+++ b/src/argaze/utils/tobii_segment_arscene_export.py
@@ -23,10 +23,10 @@ def main():
parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='segment path')
parser.add_argument('-t', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)')
- parser.add_argument('-p', '--project_path', metavar='ARGAZE_PROJECT', type=str, default=None, help='json argaze project filepath')
+ parser.add_argument('-p', '--env_path', metavar='ENVIRONMENT_PATH', type=str, default=None, help='json argaze environment filepath')
parser.add_argument('-b', '--borders', metavar='BORDERS', type=float, default=16.666, help='define left and right borders mask (%) to not detect aruco out of these borders')
parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)')
- parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-d', '--debug', metavar='DEBUG', type=bool, default=False, help='Enable visualisation and console outputs')
args = parser.parse_args()
if args.segment_path != None:
@@ -99,16 +99,23 @@ def main():
if tobii_gaze_position.validity == 0:
gaze_position_px = (int(tobii_gaze_position.value[0] * tobii_segment_video.width), int(tobii_gaze_position.value[1] * tobii_segment_video.height))
- ts_gaze_positions[ts] = GazeFeatures.GazePosition(gaze_position_px)
+ ts_gaze_positions[ts] = GazeFeatures.GazePosition(gaze_position_px)
- # Prepare video exportation at the same format than segment video
- output_video = TobiiVideo.TobiiVideoOutput(aoi_mp4_filepath, tobii_segment_video.stream)
+ print('\n')
- # Load ar scene
- ar_scene = ArScene.ArScene.from_json(args.project_path)
+ if args.debug:
- print('\n')
- print(ar_scene)
+ # Prepare video exportation at the same format than segment video
+ output_video = TobiiVideo.TobiiVideoOutput(aoi_mp4_filepath, tobii_segment_video.stream)
+
+ # Load ArEnvironment
+ ar_env = ArFeatures.ArEnvironment.from_json(args.env_path)
+
+ if args.debug:
+ print(ar_env)
+
+ # Work with first scene only
+ _, ar_scene = next(iter(ar_env.items()))
# Create timestamped buffer to store AOIs and primary time stamp offset
ts_offset_aois = DataStructures.TimeStampedBuffer()
@@ -117,7 +124,7 @@ def main():
try:
# Initialise progress bar
- MiscFeatures.printProgressBar(0, tobii_segment_video.duration/1e3, prefix = 'Progress:', suffix = 'Complete', length = 100)
+ MiscFeatures.printProgressBar(0, tobii_segment_video.duration/1e3, prefix = 'ArUco detection & AOI projection:', suffix = 'Complete', length = 100)
# Iterate on video frames
for video_ts, video_frame in tobii_segment_video.frames():
@@ -145,8 +152,14 @@ def main():
cv.rectangle(video_frame.matrix, (0, 0), (int(video_frame.width*args.borders/100), int(video_frame.height)), (0, 0, 0), -1)
cv.rectangle(video_frame.matrix, (int(video_frame.width*(1 - args.borders/100)), 0), (int(video_frame.width), int(video_frame.height)), (0, 0, 0), -1)
+ # Detect aruco markers into frame
+ ar_env.aruco_detector.detect_markers(video_frame.matrix)
+
+ # Estimate markers poses
+ ar_env.aruco_detector.estimate_markers_pose()
+
# Estimate scene pose from ArUco markers into frame.
- tvec, rmat, _ = ar_scene.estimate_pose(video_frame.matrix)
+ tvec, rmat, _ = ar_scene.estimate_pose(ar_env.aruco_detector.detected_markers)
# Project AOI scene into frame according estimated pose
aoi_scene_projection = ar_scene.project(tvec, rmat, visual_hfov=TobiiSpecifications.VISUAL_HFOV)
@@ -156,17 +169,16 @@ def main():
projected_aois[aoi_name] = numpy.rint(aoi_scene_projection[aoi_name]).astype(int)
- # Draw detected markers
- ar_scene.aruco_detector.draw_detected_markers(visu_frame.matrix)
+ if args.debug:
- # Draw AOI
- aoi_scene_projection.draw(visu_frame.matrix, (0, 0), color=(0, 255, 255))
+ # Draw detected markers
+ ar_env.aruco_detector.draw_detected_markers(visu_frame.matrix)
- # Catch exceptions raised by estimate_pose and project methods
- except (ArScene.PoseEstimationFailed, ArScene.SceneProjectionFailed) as e:
+ # Draw AOI
+ aoi_scene_projection.draw(visu_frame.matrix, (0, 0), color=(0, 255, 255))
- # Draw detected markers
- ar_scene.aruco_detector.draw_detected_markers(visu_frame.matrix)
+ # Catch exceptions raised by estimate_pose and project methods
+ except (ArFeatures.PoseEstimationFailed, ArFeatures.SceneProjectionFailed) as e:
if str(e) == 'Unconsistent marker poses':
@@ -176,8 +188,13 @@ def main():
projected_aois['error'] = str(e)
- cv.rectangle(visu_frame.matrix, (0, 100), (550, 150), (127, 127, 127), -1)
- cv.putText(visu_frame.matrix, str(e), (20, 130), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+ if args.debug:
+
+ # Draw detected markers
+ ar_env.aruco_detector.draw_detected_markers(visu_frame.matrix)
+
+ cv.rectangle(visu_frame.matrix, (0, 100), (550, 150), (127, 127, 127), -1)
+ cv.putText(visu_frame.matrix, str(e), (20, 130), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
# Raised when timestamped buffer is empty
except KeyError as e:
@@ -187,86 +204,91 @@ def main():
projected_aois['offset'] = 0
projected_aois['error'] = e
- cv.rectangle(visu_frame.matrix, (0, 100), (550, 150), (127, 127, 127), -1)
- cv.putText(visu_frame.matrix, str(e), (20, 130), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA)
+ if args.debug:
+
+ cv.rectangle(visu_frame.matrix, (0, 100), (550, 150), (127, 127, 127), -1)
+ cv.putText(visu_frame.matrix, str(e), (20, 130), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA)
# Store projected AOI
ts_offset_aois[video_ts] = projected_aois
- # Draw gaze positions until next frame
- try:
-
- # Get next gaze position
- ts_start, start_gaze_position = ts_gaze_positions.first
- ts_next, next_gaze_position = ts_gaze_positions.first
-
- # Check next gaze position is not after next frame time
- while ts_next < next_video_ts:
+ if args.debug:
+ # Draw gaze positions until next frame
+ try:
- ts_start, start_gaze_position = ts_gaze_positions.pop_first()
+ # Get next gaze position
+ ts_start, start_gaze_position = ts_gaze_positions.first
ts_next, next_gaze_position = ts_gaze_positions.first
+
+ # Check next gaze position is not after next frame time
+ while ts_next < next_video_ts:
- # Draw start gaze
- start_gaze_position.draw(visu_frame.matrix)
+ ts_start, start_gaze_position = ts_gaze_positions.pop_first()
+ ts_next, next_gaze_position = ts_gaze_positions.first
- if start_gaze_position.valid and next_gaze_position.valid:
+ # Draw start gaze
+ start_gaze_position.draw(visu_frame.matrix)
- # Draw movement from start to next
- cv.line(visu_frame.matrix, start_gaze_position, next_gaze_position, (0, 255, 255), 1)
+ if start_gaze_position.valid and next_gaze_position.valid:
- if start_gaze_position.valid:
+ # Draw movement from start to next
+ cv.line(visu_frame.matrix, start_gaze_position, next_gaze_position, (0, 255, 255), 1)
- # Write last start gaze position
- cv.putText(visu_frame.matrix, str(start_gaze_position.value), start_gaze_position.value, cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
-
- # Write last start gaze position timing
- cv.rectangle(visu_frame.matrix, (0, 50), (550, 100), (31, 31, 31), -1)
- cv.putText(visu_frame.matrix, f'Gaze time: {ts_start*1e-3:.3f} ms', (20, 85), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
-
- # Empty gaze position
- except IndexError:
- pass
-
- # Draw focus area
- cv.rectangle(visu_frame.matrix, (int(video_frame.width*args.borders/100.), 0), (int(visu_frame.width*(1-args.borders/100)), int(visu_frame.height)), (255, 150, 150), 1)
-
- # Draw center
- cv.line(visu_frame.matrix, (int(visu_frame.width/2) - 50, int(visu_frame.height/2)), (int(visu_frame.width/2) + 50, int(visu_frame.height/2)), (255, 150, 150), 1)
- cv.line(visu_frame.matrix, (int(visu_frame.width/2), int(visu_frame.height/2) - 50), (int(visu_frame.width/2), int(visu_frame.height/2) + 50), (255, 150, 150), 1)
-
- # Write segment timing
- cv.rectangle(visu_frame.matrix, (0, 0), (550, 50), (63, 63, 63), -1)
- cv.putText(visu_frame.matrix, f'Video time: {video_ts*1e-3:.3f} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
-
- if args.window:
-
- # Close window using 'Esc' key
- if cv.waitKey(1) == 27:
- break
-
- # Display visualisation
- cv.imshow(f'Segment {tobii_segment.id} ArUco AOI', visu_frame.matrix)
-
- # Write video
- output_video.write(visu_frame.matrix)
+ if start_gaze_position.valid:
+
+ # Write last start gaze position
+ cv.putText(visu_frame.matrix, str(start_gaze_position.value), start_gaze_position.value, cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
+
+ # Write last start gaze position timing
+ cv.rectangle(visu_frame.matrix, (0, 50), (550, 100), (31, 31, 31), -1)
+ cv.putText(visu_frame.matrix, f'Gaze time: {ts_start*1e-3:.3f} ms', (20, 85), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+
+ # Empty gaze position
+ except IndexError:
+ pass
+
+ # Draw focus area
+ cv.rectangle(visu_frame.matrix, (int(video_frame.width*args.borders/100.), 0), (int(visu_frame.width*(1-args.borders/100)), int(visu_frame.height)), (255, 150, 150), 1)
+
+ # Draw center
+ cv.line(visu_frame.matrix, (int(visu_frame.width/2) - 50, int(visu_frame.height/2)), (int(visu_frame.width/2) + 50, int(visu_frame.height/2)), (255, 150, 150), 1)
+ cv.line(visu_frame.matrix, (int(visu_frame.width/2), int(visu_frame.height/2) - 50), (int(visu_frame.width/2), int(visu_frame.height/2) + 50), (255, 150, 150), 1)
+
+ # Write segment timing
+ cv.rectangle(visu_frame.matrix, (0, 0), (550, 50), (63, 63, 63), -1)
+ cv.putText(visu_frame.matrix, f'Video time: {video_ts*1e-3:.3f} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+
+ if args.debug:
+
+ # Close window using 'Esc' key
+ if cv.waitKey(1) == 27:
+ break
+
+ # Display visualisation
+ cv.imshow(f'Segment {tobii_segment.id} ArUco AOI', visu_frame.matrix)
+
+ # Write video
+ output_video.write(visu_frame.matrix)
# Update Progress Bar
progress = video_ts*1e-3 - int(args.time_range[0] * 1e3)
- MiscFeatures.printProgressBar(progress, tobii_segment_video.duration*1e-3, prefix = 'Progress:', suffix = 'Complete', length = 100)
+ MiscFeatures.printProgressBar(progress, tobii_segment_video.duration*1e-3, prefix = 'ArUco detection & AOI projection:', suffix = 'Complete', length = 100)
# Exit on 'ctrl+C' interruption
except KeyboardInterrupt:
pass
- # Stop frame display
- cv.destroyAllWindows()
+ if args.debug:
+
+ # Stop frame display
+ cv.destroyAllWindows()
- # End output video file
- output_video.close()
+ # End output video file
+ output_video.close()
# Print aruco detection metrics
print('\n\nAruco marker detection metrics')
- try_count, detected_counts = ar_scene.aruco_detector.detection_metrics
+ try_count, detected_counts = ar_env.aruco_detector.detection_metrics
for marker_id, detected_count in detected_counts.items():
print(f'\tMarkers {marker_id} has been detected in {detected_count} / {try_count} frames ({round(100 * detected_count / try_count, 2)} %)')
diff --git a/src/argaze/utils/tobii_segment_gaze_metrics_export.py b/src/argaze/utils/tobii_segment_gaze_metrics_export.py
index a138087..1e530e0 100644
--- a/src/argaze/utils/tobii_segment_gaze_metrics_export.py
+++ b/src/argaze/utils/tobii_segment_gaze_metrics_export.py
@@ -60,7 +60,6 @@ def main():
fixations_json_filepath = f'{destination_path}/gaze_fixations.json'
saccades_json_filepath = f'{destination_path}/gaze_saccades.json'
- movements_json_filepath = f'{destination_path}/gaze_movements.json'
gaze_status_json_filepath = f'{destination_path}/gaze_status.json'
gaze_metrics_period_filepath = f'{destination_path}/gaze_metrics_{int(args.period)}s.csv'
@@ -72,13 +71,11 @@ def main():
# Load gaze movements
ts_fixations = GazeFeatures.TimeStampedGazeMovements.from_json(fixations_json_filepath)
ts_saccades = GazeFeatures.TimeStampedGazeMovements.from_json(saccades_json_filepath)
- ts_movements = GazeFeatures.TimeStampedGazeMovements.from_json(movements_json_filepath)
ts_status = GazeFeatures.TimeStampedGazeStatus.from_json(gaze_status_json_filepath)
print(f'\nLoaded gaze movements count:')
print(f'\tFixations: {len(ts_fixations)}')
print(f'\tSaccades: {len(ts_saccades)}')
- print(f'\tMovements: {len(ts_movements)}')
# Load tobii segment
tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None)
@@ -98,7 +95,6 @@ def main():
positions_exist = len(ts_gaze_positions) > 0
fixations_exist = len(ts_fixations) > 0
saccades_exist = len(ts_saccades) > 0
- movements_exist = len(ts_movements) > 0
status_exist = len(ts_status) > 0
if positions_exist:
@@ -131,17 +127,6 @@ def main():
# Add 'end' column
saccades_dataframe['end'] = saccades_dataframe.index + saccades_dataframe.duration
- if movements_exist:
-
- # Create pandas dataframe
- movements_dataframe = ts_movements.as_dataframe()
-
- # Reset time range offset
- movements_dataframe.index = movements_dataframe.index - movements_dataframe.index[0]
-
- # Add 'end' column
- movements_dataframe['end'] = movements_dataframe.index + movements_dataframe.duration
-
# Define a function to export metrics for a period of time
def metrics_for_period(period_start_ts, period_end_ts):
@@ -220,35 +205,10 @@ def main():
period_metrics['saccades_duration_ratio (%)'] = saccades_duration_sum / period_duration * 100
period_metrics['saccades_distance_mean (px)'] = saccades_period_dataframe.distance.mean()
- # Default movements movement analysis
- movements_duration_sum = 0.0
- period_metrics['movements_number'] = 0
- period_metrics['movements_duration_mean (ms)'] = None
- period_metrics['movements_duration_sum (ms)'] = None
- period_metrics['movements_duration_ratio (%)'] = None
- period_metrics['movements_distance_mean (px)'] = None
-
- # Analyse movements movements
- if movements_exist:
-
- # Select period
- movements_period_dataframe = movements_dataframe[(movements_dataframe.index >= period_start_ts) & (movements_dataframe.end < period_end_ts)]
-
- if not movements_period_dataframe.empty:
-
- #print('\n* Unknown movements:\n', movements_period_dataframe)
-
- movements_duration_sum = movements_period_dataframe.duration.sum()
- period_metrics['movements_number'] = movements_period_dataframe.shape[0]
- period_metrics['movements_duration_mean (ms)'] = movements_period_dataframe.duration.mean() * 1e-3
- period_metrics['movements_duration_sum (ms)'] = movements_duration_sum * 1e-3
- period_metrics['movements_duration_ratio (%)'] = movements_duration_sum / period_duration * 100
- period_metrics['movements_distance_mean (px)'] = movements_period_dataframe.distance.mean()
-
# Analyse exploit/explore
- if saccades_duration_sum != 0.0 or movements_duration_sum != 0.0:
+ if saccades_duration_sum != 0.0:
- period_metrics['exploit_explore_ratio'] = fixations_duration_sum / (saccades_duration_sum + movements_duration_sum)
+ period_metrics['exploit_explore_ratio'] = fixations_duration_sum / saccades_duration_sum
else:
diff --git a/src/argaze/utils/tobii_segment_gaze_movements_export.py b/src/argaze/utils/tobii_segment_gaze_movements_export.py
index f8bdb48..85fe74b 100644
--- a/src/argaze/utils/tobii_segment_gaze_movements_export.py
+++ b/src/argaze/utils/tobii_segment_gaze_movements_export.py
@@ -26,9 +26,8 @@ def main():
parser.add_argument('-t', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)')
parser.add_argument('-dev', '--deviation_max_threshold', metavar='DISPERSION_THRESHOLD', type=int, default=None, help='maximal distance for fixation identification in pixel')
parser.add_argument('-dmin', '--duration_min_threshold', metavar='DURATION_MIN_THRESHOLD', type=int, default=200, help='minimal duration for fixation identification in millisecond')
- parser.add_argument('-v', '--visu', metavar='VISU', type=bool, default=False, help='enable data visualisation')
parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)')
- parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-d', '--debug', metavar='DEBUG', type=bool, default=False, help='Enable visualisation and console outputs')
args = parser.parse_args()
# Manage destination path
@@ -291,13 +290,14 @@ def main():
# Start movement identification
ts_fixations = GazeFeatures.TimeStampedGazeMovements()
ts_saccades = GazeFeatures.TimeStampedGazeMovements()
- ts_movements = GazeFeatures.TimeStampedGazeMovements()
ts_status = GazeFeatures.TimeStampedGazeStatus()
# Initialise progress bar
MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGazeMovements identification:', suffix = 'Complete', length = 100)
- for gaze_movement in movement_identifier(ts_gaze_positions):
+ for ts, gaze_position in ts_gaze_positions.items():
+
+ gaze_movement = movement_identifier.identify(ts, gaze_position)
if isinstance(gaze_movement, DispersionBasedGazeMovementIdentifier.Fixation):
@@ -319,24 +319,13 @@ def main():
ts_status[ts] = GazeFeatures.GazeStatus.from_position(position, 'Saccade', len(ts_saccades))
- else:
-
- start_ts, start_position = gaze_movement.positions.first
-
- ts_movements[start_ts] = gaze_movement
-
- for ts, position in gaze_movement.positions.items():
-
- ts_status[ts] = GazeFeatures.GazeStatus.from_position(position, 'GazeMovement', len(ts_movements))
-
# Update Progress Bar
- progress = start_ts - int(args.time_range[0] * 1e6)
+ progress = ts - int(args.time_range[0] * 1e6)
MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'Gaze movements identification:', suffix = 'Complete', length = 100)
print(f'\nGazeMovements identification metrics:')
print(f'\t{len(ts_fixations)} fixations found')
print(f'\t{len(ts_saccades)} saccades found')
- print(f'\t{len(ts_movements)} movements found')
ts_fixations.to_json(fixations_json_filepath)
print(f'\nGaze fixations saved into {fixations_json_filepath}')
@@ -344,9 +333,6 @@ def main():
ts_saccades.to_json(saccades_json_filepath)
print(f'Gaze saccades saved into {saccades_json_filepath}')
- ts_movements.to_json(movements_json_filepath)
- print(f'Gaze movements saved into {movements_json_filepath}')
-
ts_status.to_json(gaze_status_json_filepath)
print(f'Gaze status saved into {gaze_status_json_filepath}')
@@ -354,7 +340,7 @@ def main():
ts_status.as_dataframe().to_csv(f'{destination_path}/gaze_status.csv')
# Edit data visualisation
- if args.visu:
+ if args.debug:
# Prepare video exportation at the same format than segment video
output_video = TobiiVideo.TobiiVideoOutput(gaze_status_video_filepath, tobii_segment_video.stream)
@@ -373,7 +359,6 @@ def main():
fixations_exist = len(ts_fixations) > 0
saccades_exist = len(ts_saccades) > 0
- movements_exist = len(ts_movements) > 0
status_exist = len(ts_status) > 0
if fixations_exist:
@@ -383,9 +368,6 @@ def main():
if saccades_exist:
current_saccade_ts, current_saccade = ts_saccades.pop_first()
- if movements_exist:
- current_movements_ts, current_movements = ts_movements.pop_first()
-
# Iterate on video frames
for video_ts, video_frame in tobii_segment_video.frames():
@@ -461,17 +443,6 @@ def main():
if video_ts >= current_saccade_ts and video_ts < current_saccade_ts + current_saccade.duration:
pass
- if movements_exist:
-
- # Check next movements movement
- if video_ts >= current_movements_ts + current_movements.duration and len(ts_movements) > 0:
-
- current_movements_ts, current_movements = ts_movements.pop_first()
-
- # While current time belongs to the current movements movement
- if video_ts >= current_movements_ts and video_ts < current_movements_ts + current_movements.duration:
- pass
-
# Draw gaze status until next frame
try:
@@ -569,14 +540,12 @@ def main():
cv.circle(visu_matrix, (selected_deviation_max_threshold + 400, 230), 2, (0, 255, 255), -1)
cv.circle(visu_matrix, (selected_deviation_max_threshold + 400, 230), selected_deviation_max_threshold, (255, 150, 150), 1)
- if args.window:
-
- # Close window using 'Esc' key
- if cv.waitKey(1) == 27:
- break
+ # Close window using 'Esc' key
+ if cv.waitKey(1) == 27:
+ break
- # Display video
- cv.imshow(f'Segment {tobii_segment.id} movements', visu_matrix)
+ # Display video
+ cv.imshow(f'Segment {tobii_segment.id} movements', visu_matrix)
# Write video
output_video.write(visu_matrix)
diff --git a/src/argaze/utils/tobii_stream_arscene_display.py b/src/argaze/utils/tobii_stream_arscene_display.py
index dabaa9b..e7a3bfb 100644
--- a/src/argaze/utils/tobii_stream_arscene_display.py
+++ b/src/argaze/utils/tobii_stream_arscene_display.py
@@ -20,9 +20,9 @@ def main():
# Manage arguments
parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
parser.add_argument('-t', '--tobii_ip', metavar='TOBII_IP', type=str, default=None, help='tobii glasses ip')
- parser.add_argument('-p', '--project_path', metavar='ARGAZE_PROJECT', type=str, default=None, help='json argaze project filepath')
+ parser.add_argument('-p', '--env_path', metavar='ENVIRONMENT_PATH', type=str, default=None, help='json argaze environment filepath')
parser.add_argument('-b', '--borders', metavar='BORDERS', type=float, default=16.666, help='define left and right borders mask (%) to not detect aruco out of these borders')
- parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-d', '--debug', metavar='DEBUG', type=bool, default=False, help='Enable visualisation and console outputs')
args = parser.parse_args()
# Create tobii controller (with auto discovery network process if no ip argument is provided)
@@ -52,11 +52,14 @@ def main():
# Enable tobii video stream
tobii_video_stream = tobii_controller.enable_video_stream()
- # Load ar scene
- ar_scene = ArScene.ArScene.from_json(args.project_path)
+ # Load ArEnvironment
+ ar_env = ArFeatures.ArEnvironment.from_json(args.env_path)
- print('\n')
- print(ar_scene)
+ if args.debug:
+ print(ar_env)
+
+ # Work with first scene only
+ _, ar_scene = next(iter(ar_env.items()))
# Start streaming
tobii_controller.start_streaming()
@@ -83,8 +86,14 @@ def main():
# Process video and data frame
try:
+ # Detect aruco markers into frame
+ ar_env.aruco_detector.detect_markers(video_frame.matrix)
+
+ # Estimate markers poses
+ ar_env.aruco_detector.estimate_markers_pose()
+
# Estimate scene pose from ArUco markers into frame.
- tvec, rmat, _ = ar_scene.estimate_pose(video_frame.matrix)
+ tvec, rmat, _ = ar_scene.estimate_pose(ar_env.aruco_detector.detected_markers)
# Project AOI scene into frame according estimated pose
aoi_scene_projection = ar_scene.project(tvec, rmat, visual_hfov=TobiiSpecifications.VISUAL_HFOV)
@@ -99,13 +108,13 @@ def main():
aoi_scene_projection.draw(visu_frame.matrix, (0, 0), color=(0, 255, 255))
# Draw detected markers
- ar_scene.aruco_detector.draw_detected_markers(visu_frame.matrix)
+ ar_env.aruco_detector.draw_detected_markers(visu_frame.matrix)
# Catch exceptions raised by estimate_pose and project methods
- except (ArScene.PoseEstimationFailed, ArScene.SceneProjectionFailed) as e:
+ except (ArFeatures.PoseEstimationFailed, ArFeatures.SceneProjectionFailed) as e:
# Draw detected markers
- ar_scene.aruco_detector.draw_detected_markers(visu_frame.matrix)
+ ar_env.aruco_detector.draw_detected_markers(visu_frame.matrix)
cv.rectangle(visu_frame.matrix, (0, 50), (550, 100), (127, 127, 127), -1)
cv.putText(visu_frame.matrix, str(e), (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)