From 8520630a08803a3b0962cedee53c3047d15bc3dd Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 21 Sep 2022 11:52:36 +0200 Subject: Renaming utils script for a better consistency. --- src/argaze/utils/README.md | 28 +- src/argaze/utils/aruco_calibration_board_export.py | 35 ++ src/argaze/utils/aruco_markers_export.py | 31 ++ src/argaze/utils/calibrate_tobii_camera.py | 120 ------ src/argaze/utils/edit_tobii_segment_aruco_pose.py | 430 -------------------- src/argaze/utils/explore_tobii_sdcard.py | 65 --- src/argaze/utils/export_aruco_markers.py | 31 -- src/argaze/utils/export_calibration_board.py | 35 -- .../export_tobii_segment_aruco_visual_scan.py | 435 --------------------- src/argaze/utils/export_tobii_segment_movements.py | 255 ------------ src/argaze/utils/export_tobii_segment_plots.py | 128 ------ .../utils/live_tobii_aruco_aoi_ivy_application.py | 122 ------ .../utils/live_tobii_aruco_aoi_ivy_controller.py | 286 -------------- src/argaze/utils/live_tobii_session.py | 84 ---- src/argaze/utils/record_tobii_session.py | 78 ---- src/argaze/utils/replay_tobii_session.py | 116 ------ src/argaze/utils/tobii_camera_calibrate.py | 120 ++++++ src/argaze/utils/tobii_sdcard_explore.py | 65 +++ src/argaze/utils/tobii_segment_aruco_aoi_edit.py | 430 ++++++++++++++++++++ src/argaze/utils/tobii_segment_aruco_aoi_export.py | 435 +++++++++++++++++++++ src/argaze/utils/tobii_segment_display.py | 116 ++++++ .../utils/tobii_segment_gaze_movements_export.py | 255 ++++++++++++ src/argaze/utils/tobii_segment_gaze_plot_export.py | 128 ++++++ src/argaze/utils/tobii_segment_record.py | 78 ++++ src/argaze/utils/tobii_stream_aruco_aoi_display.py | 104 +++++ .../tobii_stream_aruco_aoi_ivy_application.py | 122 ++++++ .../utils/tobii_stream_aruco_aoi_ivy_controller.py | 286 ++++++++++++++ src/argaze/utils/tobii_stream_display.py | 84 ++++ 28 files changed, 2303 insertions(+), 2199 deletions(-) create mode 100644 src/argaze/utils/aruco_calibration_board_export.py create mode 100644 src/argaze/utils/aruco_markers_export.py delete mode 100644 src/argaze/utils/calibrate_tobii_camera.py delete mode 100644 src/argaze/utils/edit_tobii_segment_aruco_pose.py delete mode 100644 src/argaze/utils/explore_tobii_sdcard.py delete mode 100644 src/argaze/utils/export_aruco_markers.py delete mode 100644 src/argaze/utils/export_calibration_board.py delete mode 100644 src/argaze/utils/export_tobii_segment_aruco_visual_scan.py delete mode 100644 src/argaze/utils/export_tobii_segment_movements.py delete mode 100644 src/argaze/utils/export_tobii_segment_plots.py delete mode 100644 src/argaze/utils/live_tobii_aruco_aoi_ivy_application.py delete mode 100644 src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py delete mode 100644 src/argaze/utils/live_tobii_session.py delete mode 100644 src/argaze/utils/record_tobii_session.py delete mode 100644 src/argaze/utils/replay_tobii_session.py create mode 100644 src/argaze/utils/tobii_camera_calibrate.py create mode 100644 src/argaze/utils/tobii_sdcard_explore.py create mode 100644 src/argaze/utils/tobii_segment_aruco_aoi_edit.py create mode 100644 src/argaze/utils/tobii_segment_aruco_aoi_export.py create mode 100644 src/argaze/utils/tobii_segment_display.py create mode 100644 src/argaze/utils/tobii_segment_gaze_movements_export.py create mode 100644 src/argaze/utils/tobii_segment_gaze_plot_export.py create mode 100644 src/argaze/utils/tobii_segment_record.py create mode 100644 src/argaze/utils/tobii_stream_aruco_aoi_display.py create mode 100644 src/argaze/utils/tobii_stream_aruco_aoi_ivy_application.py create mode 100644 src/argaze/utils/tobii_stream_aruco_aoi_ivy_controller.py create mode 100644 src/argaze/utils/tobii_stream_display.py (limited to 'src') diff --git a/src/argaze/utils/README.md b/src/argaze/utils/README.md index 321e83a..32056d9 100644 --- a/src/argaze/utils/README.md +++ b/src/argaze/utils/README.md @@ -15,78 +15,78 @@ python ./src/argaze/utils/UTILS_SCRIPT.py -h - Export 50 4x4 markers at 300 dpi into an export/markers folder: ``` -python ./src/argaze/utils/export_aruco_markers.py -o export/markers +python ./src/argaze/utils/aruco_markers_export.py -o export/markers ``` - Export a 7 columns and 5 rows calibration board with 5cm squares and 3cm markers inside at 50 dpi into an export folder: ``` -python ./src/argaze/utils/export_calibration_board.py 7 5 5 3 -o export +python ./src/argaze/utils/aruco_calibration_board_export.py 7 5 5 3 -o export ``` - Calibrate Tobii Glasses Pro 2 camera (-t IP_ADDRESS) using a 7 columns and 5 rows calibration board with 5cm squares and 3cm markers inside. Then, export its optical parameters into an tobii_camera.json file: ``` -python ./src/argaze/utils/calibrate_tobii_camera.py 7 5 5 3 -t IP_ADDRESS -o export/tobii_camera.json +python ./src/argaze/utils/tobii_camera_calibrate.py 7 5 5 3 -t IP_ADDRESS -o export/tobii_camera.json ``` - Display Tobii Glasses Pro 2 camera video stream (-t IP_ADDRESS) with a live gaze pointer: ``` -python ./src/argaze/utils/live_tobii_session.py -t IP_ADDRESS +python ./src/argaze/utils/tobii_stream_display.py -t IP_ADDRESS ``` - Record a Tobii Glasses Pro 2 'myProject' session for a 'myUser' participant on Tobii interface's SD card (-t IP_ADDRESS): ``` -python ./src/argaze/utils/record_tobii_session.py -t IP_ADDRESS -p myProject -u myUser +python ./src/argaze/utils/tobii_segment_record.py -t IP_ADDRESS -p myProject -u myUser ``` - Explore Tobii Glasses Pro 2 interface's SD Card (-d DRIVE_PATH, -p PROJECT_PATH, -r RECORDING_PATH, -s SEGMENT_PATH): ``` -python ./src/argaze/utils/explore_tobii_sdcard.py -d DRIVE_PATH +python ./src/argaze/utils/tobii_sdcard_explore.py -d DRIVE_PATH ``` ``` -python ./src/argaze/utils/explore_tobii_sdcard.py -p PROJECT_PATH +python ./src/argaze/utils/tobii_sdcard_explore.py -p PROJECT_PATH ``` ``` -python ./src/argaze/utils/explore_tobii_sdcard.py -r RECORDING_PATH +python ./src/argaze/utils/tobii_sdcard_explore.py -r RECORDING_PATH ``` ``` -python ./src/argaze/utils/explore_tobii_sdcard.py -s SEGMENT_PATH +python ./src/argaze/utils/tobii_sdcard_explore.py -s SEGMENT_PATH ``` - Replay a time range selection (-r IN OUT) Tobii Glasses Pro 2 session (-s SEGMENT_PATH) synchronizing video and gaze data together: ``` -python ./src/argaze/utils/replay_tobii_session.py -s SEGMENT_PATH -r IN OUT +python ./src/argaze/utils/tobii_segment_display.py -s SEGMENT_PATH -r IN OUT ``` - Export Tobii segment fixations and saccades (-s SEGMENT_PATH) from a time range selection (-r IN OUT) as fixations.csv and saccades.csv files saved into the segment folder: ``` -python ./src/argaze/utils/export_tobii_segment_movements.py -s SEGMENT_PATH -r IN OUT +python ./src/argaze/utils/tobii_segment_gaze_movements_export.py -s SEGMENT_PATH -r IN OUT ``` - Track ArUco markers into a Tobii camera video segment (-s SEGMENT_PATH) into a time range selection (-r IN OUT). Load aoi scene .obj file related to each marker (-mi MARKER_ID, PATH_TO_AOI_SCENE), position each scene virtually relatively to its detected ArUco markers then project the scene into camera frame. Then, detect if Tobii gaze point is focusing onto AOIs to build the segment visual scan and export it as a visual_scan.csv, visual_scan.jpg, visual_scan.mp4 files: ``` -python ./src/argaze/utils/export_tobii_segment_aruco_visual_scan.py -s SEGMENT_PATH -c export/tobii_camera.json -r IN OUT -ms 5 -mi '{"MARKER_ID":"PATH_TO_AOI_SCENE.obj",...}' +python ./src/argaze/utils/tobii_segment_aruco_aoi_export.py -s SEGMENT_PATH -c export/tobii_camera.json -r IN OUT -ms 5 -mi '{"MARKER_ID":"PATH_TO_AOI_SCENE.obj",...}' ``` - Track ArUco markers into Tobii camera video stream (-t IP_ADDRESS). Load aoi scene .obj file related to each marker (-mi MARKER_ID, PATH_TO_AOI_SCENE), position each scene virtually relatively to its detected ArUco markers then project the scene into camera frame. Then, detect if Tobii gaze point is inside any AOI and send the look at pointer over Ivy default bus: ``` -python ./src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py -t IP_ADDRESS -c export/tobii_camera.json -ms 5 -mi '{"MARKER_ID":"PATH_TO_AOI_SCENE.obj",...}' +python ./src/argaze/utils/tobii_stream_aruco_aoi_ivy_controller.py -t IP_ADDRESS -c export/tobii_camera.json -ms 5 -mi '{"MARKER_ID":"PATH_TO_AOI_SCENE.obj",...}' ``` - Define AOI scene from a ArUco marker (-a AOI_SCENE) and bind to Ivy default bus to receive live look at pointer data.: ``` -python ./src/argaze/utils/live_tobii_aruco_aoi_ivy_application.py.py -a AOI_SCENE -i MARKERS_ID +python ./src/argaze/utils/tobii_stream_aruco_aoi_ivy_application.py.py -a AOI_SCENE -i MARKERS_ID ``` diff --git a/src/argaze/utils/aruco_calibration_board_export.py b/src/argaze/utils/aruco_calibration_board_export.py new file mode 100644 index 0000000..6d925bd --- /dev/null +++ b/src/argaze/utils/aruco_calibration_board_export.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python + +import argparse +import os + +from argaze.ArUcoMarkers import ArUcoBoard + +def main(): + """Generates ArUco board to calibrate a camera.""" + + # manage arguments + parser = argparse.ArgumentParser(description=main.__doc__) + parser.add_argument('columns', metavar='COLS_NUMBER', type=int, default=7, help='number of columns') + parser.add_argument('rows', metavar='ROWS_NUMBER', type=int, default=5, help='number of rows') + parser.add_argument('square_size', metavar='SQUARE_SIZE', type=int, default=5, help='square size (cm)') + parser.add_argument('marker_size', metavar='MARKER_SIZE', type=int, default=3, help='marker size (cm)') + parser.add_argument('-o', '--output', metavar='OUT', type=str, default='.', help='destination path') + parser.add_argument('-d', '--dictionary', metavar='DICT', type=str, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL,DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)') + parser.add_argument('-r', '--resolution', metavar='RES', type=int, default=50, help='picture resolution in dpi') + args = parser.parse_args() + + # manage destination folder + if not os.path.exists(args.output): + os.makedirs(args.output) + print(f'{args.output} folder created') + + # create aruco board + aruco_board = ArUcoBoard.ArUcoBoard(args.dictionary, args.columns, args.rows, args.square_size, args.marker_size) + + # export aruco board + aruco_board.export(args.output, args.resolution) + +if __name__ == '__main__': + + main() \ No newline at end of file diff --git a/src/argaze/utils/aruco_markers_export.py b/src/argaze/utils/aruco_markers_export.py new file mode 100644 index 0000000..78c996f --- /dev/null +++ b/src/argaze/utils/aruco_markers_export.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python + +import argparse +import os + +from argaze.ArUcoMarkers import ArUcoMarkersDictionary + +def main(): + """Generates ArUco markers to place into a scene.""" + + # manage arguments + parser = argparse.ArgumentParser(description=main.__doc__) + parser.add_argument('-o', '--output', metavar='OUT', type=str, default='.', help='destination path') + parser.add_argument('-d', '--dictionary', metavar='DICT', type=str, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL,DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)') + parser.add_argument('-r', '--resolution', metavar='RES', type=int, default=300, help='picture resolution in dpi') + args = parser.parse_args() + + # manage destination folder + if not os.path.exists(args.output): + os.makedirs(args.output) + print(f'{args.output} folder created') + + # create aruco markers dictionary + aruco_markers_dict = ArUcoMarkersDictionary.ArUcoMarkersDictionary(args.dictionary) + + # export markers + aruco_markers_dict.export_all(args.output, args.resolution) + +if __name__ == '__main__': + + main() \ No newline at end of file diff --git a/src/argaze/utils/calibrate_tobii_camera.py b/src/argaze/utils/calibrate_tobii_camera.py deleted file mode 100644 index 61fc56c..0000000 --- a/src/argaze/utils/calibrate_tobii_camera.py +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/env python - -import argparse -import os -import time - -from argaze.TobiiGlassesPro2 import TobiiController, TobiiVideo -from argaze.ArUcoMarkers import ArUcoBoard, ArUcoTracker, ArUcoCamera - -import cv2 as cv - -def main(): - """ - Captures board pictures and finally outputs camera calibration data into a .json file. - - - Export and print a calibration board using - - Place the calibration board in order to view it entirely on screen and move the camera in many configurations (orientation and distance) : the script will automatically take pictures. Do this step with a good lighting and a clear background. - - Once enough pictures have been captured (~20), press Esc key then, wait for the camera calibration processing. - - Finally, check rms parameter: it should be between 0. and 1. if the calibration suceeded (lower is better). - - ### Reference: - - [Camera calibration using ArUco marker tutorial](https://automaticaddison.com/how-to-perform-camera-calibration-using-opencv/) - """ - - # manage arguments - parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) - parser.add_argument('columns', metavar='COLS_NUMBER', type=int, default=7, help='number of columns') - parser.add_argument('rows', metavar='ROWS_NUMBER', type=int, default=5, help='number of rows') - parser.add_argument('square_size', metavar='SQUARE_SIZE', type=float, default=5, help='square size (cm)') - parser.add_argument('marker_size', metavar='MARKER_SIZE', type=float, default=3, help='marker size (cm)') - parser.add_argument('-t', '--tobii_ip', metavar='TOBII_IP', type=str, default='192.168.1.10', help='tobii glasses ip') - parser.add_argument('-o', '--output', metavar='OUT', type=str, default='camera.json', help='destination filepath') - parser.add_argument('-d', '--dictionary', metavar='DICT', type=str, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL,DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)') - args = parser.parse_args() - - # Create tobii controller - tobii_controller = TobiiController.TobiiController(args.tobii_ip, 'myProject', 'mySelf') - - tobii_controller.set_video_freq_25() - - # Enable tobii video stream - tobii_video_stream = tobii_controller.enable_video_stream() - - # Create aruco camera - aruco_camera = ArUcoCamera.ArUcoCamera() - - # Create aruco board - aruco_board = ArUcoBoard.ArUcoBoard(args.dictionary, args.columns, args.rows, args.square_size, args.marker_size) - - # Create aruco tracker - aruco_tracker = ArUcoTracker.ArUcoTracker(args.dictionary, args.marker_size, aruco_camera) - - # Start tobii glasses streaming - tobii_controller.start_streaming() - - print("Camera calibration starts") - print("Waiting for calibration board...") - - expected_markers_number = aruco_board.get_markers_number() - expected_corners_number = aruco_board.get_corners_number() - - # capture loop - try: - - while tobii_video_stream.is_alive(): - - # capture frame with a full displayed board - video_ts, video_frame = tobii_video_stream.read() - - # track all markers in the board - aruco_tracker.track_board(video_frame.matrix, aruco_board, expected_markers_number) - - # draw only markers - aruco_tracker.draw(video_frame.matrix) - - # draw current calibration data count - cv.putText(video_frame.matrix, f'Capture: {aruco_camera.get_calibration_data_count()}', (50, 50), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv.LINE_AA) - cv.imshow('Tobii Camera Calibration', video_frame.matrix) - - # if all board corners are detected - if aruco_tracker.get_board_corners_number() == expected_corners_number: - - # draw board corners to notify a capture is done - aruco_tracker.draw_board(video_frame.matrix) - - # append data - aruco_camera.store_calibration_data(aruco_tracker.get_board_corners(), aruco_tracker.get_board_corners_ids()) - - cv.imshow('Tobii Camera Calibration', video_frame.matrix) - - # close window using 'Esc' key - if cv.waitKey(1) == 27: - break - - # exit on 'ctrl+C' interruption - except KeyboardInterrupt: - pass - - # stop frame display - cv.destroyAllWindows() - - # Stop tobii glasses streaming - tobii_controller.stop_streaming() - - print('\nCalibrating camera...') - aruco_camera.calibrate(aruco_board, video_frame.width, video_frame.height) - - print('\nCalibration succeeded!') - print(f'\nRMS:\n{aruco_camera.get_rms()}') - print(f'\nDimensions:\n{video_frame.width}x{video_frame.height}') - print(f'\nCamera matrix:\n{aruco_camera.get_K()}') - print(f'\nDistortion coefficients:\n{aruco_camera.get_D()}') - - aruco_camera.save_calibration_file(args.output) - - print(f'\nCalibration data exported into {args.output} file') - -if __name__ == '__main__': - - main() diff --git a/src/argaze/utils/edit_tobii_segment_aruco_pose.py b/src/argaze/utils/edit_tobii_segment_aruco_pose.py deleted file mode 100644 index 61d695d..0000000 --- a/src/argaze/utils/edit_tobii_segment_aruco_pose.py +++ /dev/null @@ -1,430 +0,0 @@ -#!/usr/bin/env python - -import argparse -import os -import json -import time - -from argaze import DataStructures -from argaze import GazeFeatures -from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiVideo -from argaze.ArUcoMarkers import * -from argaze.AreaOfInterest import * -from argaze.utils import MiscFeatures - -import numpy -import cv2 as cv - -def main(): - """ - Open video file with ArUco marker scene inside - """ - - # Manage arguments - parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) - parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='segment path') - parser.add_argument('-r', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)') - parser.add_argument('-c', '--camera_calibration', metavar='CAM_CALIB', type=str, default=None, help='json camera calibration filepath') - parser.add_argument('-p', '--aruco_tracker_configuration', metavar='TRACK_CONFIG', type=str, default=None, help='json aruco tracker configuration filepath') - parser.add_argument('-md', '--marker_dictionary', metavar='MARKER_DICT', type=str, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL,DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)') - parser.add_argument('-ms', '--marker_size', metavar='MARKER_SIZE', type=float, default=6, help='aruco marker size (cm)') - parser.add_argument('-mi', '--marker_id_scene', metavar='MARKER_ID_SCENE', type=json.loads, help='{"marker": "aoi scene filepath"} dictionary') - parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)') - parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction) - args = parser.parse_args() - - if args.segment_path != None: - - # Manage markers id to track - if args.marker_id_scene == None: - print(f'Track any Aruco markers from the {args.marker_dictionary} dictionary') - else: - print(f'Track Aruco markers {list(args.marker_id_scene.keys())} from the {args.marker_dictionary} dictionary') - - # Manage destination path - destination_path = '.' - if args.output != None: - - if not os.path.exists(os.path.dirname(args.output)): - - os.makedirs(os.path.dirname(args.output)) - print(f'{os.path.dirname(args.output)} folder created') - - destination_path = args.output - - else: - - destination_path = args.segment_path - - # Export into a dedicated time range folder - if args.time_range[1] != None: - timerange_path = f'[{int(args.time_range[0])}s - {int(args.time_range[1])}s]' - else: - timerange_path = f'[all]' - - destination_path = f'{destination_path}/{timerange_path}' - - if not os.path.exists(destination_path): - - os.makedirs(destination_path) - print(f'{destination_path} folder created') - - #vs_data_filepath = f'{destination_path}/visual_scan.csv' - - # Load a tobii segment - tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None) - - # Load a tobii segment video - tobii_segment_video = tobii_segment.load_video() - print(f'Video properties:\n\tduration: {tobii_segment_video.get_duration()/1e6} s\n\twidth: {tobii_segment_video.get_width()} px\n\theight: {tobii_segment_video.get_height()} px') - - # Create aruco camera - aruco_camera = ArUcoCamera.ArUcoCamera() - - # Load calibration file - if args.camera_calibration != None: - - aruco_camera.load_calibration_file(args.camera_calibration) - - else: - - raise ValueError('.json camera calibration filepath required. Use -c option.') - - # Create aruco tracker - aruco_tracker = ArUcoTracker.ArUcoTracker(args.marker_dictionary, args.marker_size, aruco_camera) - - # Load specific configuration file - def load_configuration_file(): - - if args.aruco_tracker_configuration != None: - - aruco_tracker.load_configuration_file(args.aruco_tracker_configuration) - - print(f'ArUcoTracker configuration for {aruco_tracker.get_markers_dictionay().get_markers_format()} markers detection:') - aruco_tracker.print_configuration() - - load_configuration_file() - - # Load AOI 3D scene for each marker - aoi3D_scenes = {} - aoi3D_scene_edits = {} - - for marker_id, aoi_scene_filepath in args.marker_id_scene.items(): - - marker_id = int(marker_id) - - aoi3D_scenes[marker_id] = AOI3DScene.AOI3DScene() - aoi3D_scenes[marker_id].load(aoi_scene_filepath) - - aoi3D_scene_edits[marker_id] = { - 'rotation': numpy.array([0.0, 0.0, 0.0]), - 'translation': numpy.array([0.0, 0.0, 0.0]) - } - - print(f'AOI in {os.path.basename(aoi_scene_filepath)} scene related to marker #{marker_id}:') - for aoi in aoi3D_scenes[marker_id].keys(): - print(f'\t{aoi}') - - def aoi3D_scene_selector(marker_id): - return aoi3D_scenes.get(marker_id, None) - - def aoi3D_scene_edit_selector(marker_id): - return aoi3D_scene_edits.get(marker_id, None) - - # Display first frame - video_ts, video_frame = tobii_segment_video.get_frame(0) - cv.imshow(f'Segment {tobii_segment.get_id()} ArUco marker editor', video_frame.matrix) - - # Init mouse interaction variables - pointer = (0, 0) - left_click = (0, 0) - right_click = (0, 0) - right_button = False - edit_trans = False # translate - edit_coord = 0 # x - - # On mouse left left_click : update pointer position - def on_mouse_event(event, x, y, flags, param): - - nonlocal pointer - nonlocal left_click - nonlocal right_click - nonlocal right_button - - # Update pointer - pointer = (x, y) - - # Update left_click - if event == cv.EVENT_LBUTTONUP: - - left_click = pointer - - # Udpate right_button - elif event == cv.EVENT_RBUTTONDOWN: - - right_button = True - - elif event == cv.EVENT_RBUTTONUP: - - right_button = False - - # Udpate right_click - if right_button: - - right_click = pointer - - cv.setMouseCallback(f'Segment {tobii_segment.get_id()} ArUco marker editor', on_mouse_event) - - # Frame selector loop - frame_index = 0 - last_frame_index = -1 - last_frame = video_frame.copy() - force_update = False - - selected_marker_id = -1 - - try: - - while True: - - # Select a frame on change - if frame_index != last_frame_index or force_update: - - video_ts, video_frame = tobii_segment_video.get_frame(frame_index) - video_ts_ms = video_ts / 1000 - - last_frame_index = frame_index - last_frame = video_frame.copy() - - # Hide frame left and right borders before tracking to ignore markers outside focus area - cv.rectangle(video_frame.matrix, (0, 0), (int(video_frame.width/6), int(video_frame.height)), (0, 0, 0), -1) - cv.rectangle(video_frame.matrix, (int(video_frame.width*(1 - 1/6)), 0), (int(video_frame.width), int(video_frame.height)), (0, 0, 0), -1) - - # Track markers with pose estimation - aruco_tracker.track(video_frame.matrix) - - else: - - video_frame = last_frame.copy() - - # Edit fake gaze position from pointer - gaze_position = GazeFeatures.GazePosition(pointer, accuracy=2) - - # Copy video frame to edit visualisation on it with out disrupting aruco tracking - visu_frame = video_frame.copy() - - # Draw markers and pose estimation - aruco_tracker.draw(visu_frame.matrix) - - # Project 3D scene on each video frame and the visualisation frame - if aruco_tracker.get_markers_number(): - - # Write detected marker ids - cv.putText(visu_frame.matrix, f'Detected markers: {aruco_tracker.get_markers_ids()}', (20, visu_frame.height - 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) - - # Update selected marker id by left_clicking on marker - for (i, marker_id) in enumerate(aruco_tracker.get_markers_ids()): - - marker_aoi = numpy.array(aruco_tracker.get_marker_corners(i)).view(AOIFeatures.AreaOfInterest) - - if marker_aoi.looked(left_click): - - selected_marker_id = marker_id - - # Select 3D scene related to selected marker - aoi3D_scene = aoi3D_scene_selector(selected_marker_id) - - # If a marker is selected - try: - - # Retreive marker index - selected_marker_index = aruco_tracker.get_marker_index(selected_marker_id) - - if aoi3D_scene == None: - raise UserWarning('No AOI 3D scene') - - # Select scene edit - aoi3D_scene_edit = aoi3D_scene_edit_selector(selected_marker_id) - - # Edit scene - if aoi3D_scene_edit != None: - - marker_x, marker_y = aruco_tracker.get_marker_center(selected_marker_index) - - if right_button: - - pointer_delta_x, pointer_delta_y = (right_click[0] - marker_x) / (visu_frame.width/3), (marker_y - right_click[1]) / (visu_frame.width/3) - - if edit_trans: - - # Edit scene rotation - if edit_coord == 0: - aoi3D_scene_edit['rotation'] = numpy.array([pointer_delta_y, aoi3D_scene_edit['rotation'][1], aoi3D_scene_edit['rotation'][2]]) - - elif edit_coord == 1: - aoi3D_scene_edit['rotation'] = numpy.array([aoi3D_scene_edit['rotation'][0], pointer_delta_x, aoi3D_scene_edit['rotation'][2]]) - - elif edit_coord == 2: - aoi3D_scene_edit['rotation'] = numpy.array([aoi3D_scene_edit['rotation'][0], aoi3D_scene_edit['rotation'][1], -1*pointer_delta_y]) - - else: - - # Edit scene translation - if edit_coord == 0: - aoi3D_scene_edit['translation'] = numpy.array([pointer_delta_x, aoi3D_scene_edit['translation'][1], aoi3D_scene_edit['translation'][2]]) - - elif edit_coord == 1: - aoi3D_scene_edit['translation'] = numpy.array([aoi3D_scene_edit['translation'][0], pointer_delta_y, aoi3D_scene_edit['translation'][2]]) - - elif edit_coord == 2: - aoi3D_scene_edit['translation'] = numpy.array([aoi3D_scene_edit['translation'][0], aoi3D_scene_edit['translation'][1], 2*pointer_delta_y]) - - # Apply transformation - aoi3D_scene_edited = aoi3D_scene.transform(aoi3D_scene_edit['translation'], aoi3D_scene_edit['rotation']) - - cv.rectangle(visu_frame.matrix, (0, 130), (460, 450), (127, 127, 127), -1) - - # Write rotation matrix - R, _ = cv.Rodrigues(aoi3D_scene_edit['rotation']) - cv.putText(visu_frame.matrix, f'Rotation matrix:', (20, 160), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) - cv.putText(visu_frame.matrix, f'{R[0][0]:.3f} {R[0][1]:.3f} {R[0][2]:.3f}', (40, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv.LINE_AA) - cv.putText(visu_frame.matrix, f'{R[1][0]:.3f} {R[1][1]:.3f} {R[1][2]:.3f}', (40, 240), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv.LINE_AA) - cv.putText(visu_frame.matrix, f'{R[2][0]:.3f} {R[2][1]:.3f} {R[2][2]:.3f}', (40, 280), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA) - - # Write translation vector - T = aoi3D_scene_edit['translation'] - cv.putText(visu_frame.matrix, f'Translation vector:', (20, 320), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) - cv.putText(visu_frame.matrix, f'{T[0]:.3f}', (40, 360), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv.LINE_AA) - cv.putText(visu_frame.matrix, f'{T[1]:.3f}', (40, 400), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv.LINE_AA) - cv.putText(visu_frame.matrix, f'{T[2]:.3f}', (40, 440), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA) - - # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it - # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable. - aoi2D_video_scene = aoi3D_scene_edited.project(aruco_tracker.get_marker_translation(selected_marker_index), aruco_tracker.get_marker_rotation(selected_marker_index), aruco_camera.get_K()) - - # Draw scene - aoi2D_video_scene.draw(visu_frame.matrix, gaze_position, exclude=['Visualisation_Plan']) - - # Write warning related to marker pose processing - except UserWarning as e: - - cv.putText(visu_frame.matrix, f'Marker {selected_marker_id}: {e}', (20, 120), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA) - - except ValueError: - - # Write error - if selected_marker_id >= 0: - cv.putText(visu_frame.matrix, f'Marker {selected_marker_id} not found', (20, 120), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA) - - # Draw focus area - cv.rectangle(visu_frame.matrix, (int(visu_frame.width/6), 0), (int(visu_frame.width*(1-1/6)), int(visu_frame.height)), (255, 150, 150), 1) - - # Draw center - cv.line(visu_frame.matrix, (int(visu_frame.width/2) - 50, int(visu_frame.height/2)), (int(visu_frame.width/2) + 50, int(visu_frame.height/2)), (255, 150, 150), 1) - cv.line(visu_frame.matrix, (int(visu_frame.width/2), int(visu_frame.height/2) - 50), (int(visu_frame.width/2), int(visu_frame.height/2) + 50), (255, 150, 150), 1) - - # Draw pointer - cv.circle(visu_frame.matrix, gaze_position, gaze_position.accuracy, (0, 255, 255), -1) - - # Write segment timing - cv.rectangle(visu_frame.matrix, (0, 0), (550, 50), (63, 63, 63), -1) - cv.putText(visu_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) - - # Write selected marker id - if selected_marker_id >= 0: - - cv.rectangle(visu_frame.matrix, (0, 50), (550, 90), (127, 127, 127), -1) - - # Select color - if edit_coord == 0: - color_axis = (0, 0, 255) - - elif edit_coord == 1: - color_axis = (0, 255, 0) - - elif edit_coord == 2: - color_axis = (255, 0, 0) - - if edit_trans: - cv.putText(visu_frame.matrix, f'Rotate marker {selected_marker_id} around axis {edit_coord + 1}', (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, color_axis, 1, cv.LINE_AA) - else: - cv.putText(visu_frame.matrix, f'Translate marker {selected_marker_id} along axis {edit_coord + 1}', (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, color_axis, 1, cv.LINE_AA) - - # Write documentation - else: - cv.rectangle(visu_frame.matrix, (0, 50), (650, 250), (127, 127, 127), -1) - cv.putText(visu_frame.matrix, f'> Left click on marker: select scene', (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) - cv.putText(visu_frame.matrix, f'> T: translate, R: rotate', (20, 120), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) - cv.putText(visu_frame.matrix, f'> Shift + 0/1/2: select axis', (20, 160), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) - cv.putText(visu_frame.matrix, f'> Right click and drag: edit axis', (20, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) - cv.putText(visu_frame.matrix, f'> Ctrl + S: save scene', (20, 240), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) - - # Reset left_click - left_click = (0, 0) - - if args.window: - - key_pressed = cv.waitKey(1) - - #if key_pressed != -1: - # print(key_pressed) - - # Select previous frame with left arrow - if key_pressed == 2: - frame_index -= 1 - - # Select next frame with right arrow - if key_pressed == 3: - frame_index += 1 - - # Clip frame index - if frame_index < 0: - frame_index = 0 - - # Edit rotation with r key - if key_pressed == 114: - edit_trans = True - - # Edit translation with t key - if key_pressed == 116: - edit_trans = False - - # Select coordinate to edit with Shift + 0, 1 or 2 - if key_pressed == 49 or key_pressed == 50 or key_pressed == 51: - edit_coord = key_pressed - 49 - - # Save selected marker edition using 'Ctrl + s' - if key_pressed == 19: - - if selected_marker_id > 0 and aoi3D_scene_edit != None: - - aoi_scene_filepath = args.marker_id_scene[f'{selected_marker_id}'] - aoi3D_scene_edited.save(aoi_scene_filepath) - - print(f'Saving scene related to marker #{selected_marker_id} into {aoi_scene_filepath}') - - # Close window using 'Esc' key - if key_pressed == 27: - break - - # Reload tracker configuration on 'c' key - if key_pressed == 99: - load_configuration_file() - force_update = True - - # Display video - cv.imshow(f'Segment {tobii_segment.get_id()} ArUco marker editor', visu_frame.matrix) - - # Wait 1 second - time.sleep(1) - - # Exit on 'ctrl+C' interruption - except KeyboardInterrupt: - pass - - # Stop frame display - cv.destroyAllWindows() - -if __name__ == '__main__': - - main() \ No newline at end of file diff --git a/src/argaze/utils/explore_tobii_sdcard.py b/src/argaze/utils/explore_tobii_sdcard.py deleted file mode 100644 index e80057f..0000000 --- a/src/argaze/utils/explore_tobii_sdcard.py +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env python - -import argparse - -from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiData, TobiiVideo - -def main(): - """ - Explore Tobii Glasses Pro 2 interface's SD Card - """ - - # manage arguments - parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) - parser.add_argument('-d', '--drive_path', metavar='DRIVE_PATH', type=str, default=None, help='drive path') - parser.add_argument('-p', '--project_path', metavar='PROJECT_PATH', type=str, default=None, help='project path') - parser.add_argument('-r', '--recording_path', metavar='RECORDING_PATH', type=str, default=None, help='recording path') - parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='segment path') - args = parser.parse_args() - - if args.drive_path != None: - - # Load all projects from a tobii drive - tobii_drive = TobiiEntities.TobiiDrive(args.drive_path) - - for project in tobii_drive.get_all_projects(): - print(f'Project id: {project.get_id()}, name: {project.get_name()}') - - elif args.project_path != None: - - # Load one tobii project - tobii_project = TobiiEntities.TobiiProject(args.project_path) - - for participant in tobii_project.get_all_participants(): - print(f'Participant id: {participant.get_id()}, name: {participant.get_name()}') - - for recording in tobii_project.get_all_recordings(): - print(f'Recording id: {recording.get_id()}, name: {recording.get_name()}') - - elif args.recording_path != None: - - # Load a tobii segment - tobii_recording = TobiiEntities.TobiiRecording(args.recording_path) - - for segment in tobii_recording.get_all_segments(): - print(f'Segment id: {segment.get_id()}') - - elif args.segment_path != None: - - # Load a tobii segment - tobii_segment = TobiiEntities.TobiiSegment(args.segment_path) - - tobii_segment_video = tobii_segment.get_video() - print(f'Video width: {tobii_segment_video.get_width()}, height: {tobii_segment_video.get_height()}, fps: {tobii_segment_video.get_fps()}') - - tobii_segment_data = tobii_segment.get_data() - - data = tobii_segment_data.load() - - for key in data.keys(): - print(f'{key}: {len(data[key])} items') - print(f'{key} first item: {data[key].popitem()} items') - -if __name__ == '__main__': - - main() \ No newline at end of file diff --git a/src/argaze/utils/export_aruco_markers.py b/src/argaze/utils/export_aruco_markers.py deleted file mode 100644 index 78c996f..0000000 --- a/src/argaze/utils/export_aruco_markers.py +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env python - -import argparse -import os - -from argaze.ArUcoMarkers import ArUcoMarkersDictionary - -def main(): - """Generates ArUco markers to place into a scene.""" - - # manage arguments - parser = argparse.ArgumentParser(description=main.__doc__) - parser.add_argument('-o', '--output', metavar='OUT', type=str, default='.', help='destination path') - parser.add_argument('-d', '--dictionary', metavar='DICT', type=str, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL,DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)') - parser.add_argument('-r', '--resolution', metavar='RES', type=int, default=300, help='picture resolution in dpi') - args = parser.parse_args() - - # manage destination folder - if not os.path.exists(args.output): - os.makedirs(args.output) - print(f'{args.output} folder created') - - # create aruco markers dictionary - aruco_markers_dict = ArUcoMarkersDictionary.ArUcoMarkersDictionary(args.dictionary) - - # export markers - aruco_markers_dict.export_all(args.output, args.resolution) - -if __name__ == '__main__': - - main() \ No newline at end of file diff --git a/src/argaze/utils/export_calibration_board.py b/src/argaze/utils/export_calibration_board.py deleted file mode 100644 index 6d925bd..0000000 --- a/src/argaze/utils/export_calibration_board.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python - -import argparse -import os - -from argaze.ArUcoMarkers import ArUcoBoard - -def main(): - """Generates ArUco board to calibrate a camera.""" - - # manage arguments - parser = argparse.ArgumentParser(description=main.__doc__) - parser.add_argument('columns', metavar='COLS_NUMBER', type=int, default=7, help='number of columns') - parser.add_argument('rows', metavar='ROWS_NUMBER', type=int, default=5, help='number of rows') - parser.add_argument('square_size', metavar='SQUARE_SIZE', type=int, default=5, help='square size (cm)') - parser.add_argument('marker_size', metavar='MARKER_SIZE', type=int, default=3, help='marker size (cm)') - parser.add_argument('-o', '--output', metavar='OUT', type=str, default='.', help='destination path') - parser.add_argument('-d', '--dictionary', metavar='DICT', type=str, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL,DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)') - parser.add_argument('-r', '--resolution', metavar='RES', type=int, default=50, help='picture resolution in dpi') - args = parser.parse_args() - - # manage destination folder - if not os.path.exists(args.output): - os.makedirs(args.output) - print(f'{args.output} folder created') - - # create aruco board - aruco_board = ArUcoBoard.ArUcoBoard(args.dictionary, args.columns, args.rows, args.square_size, args.marker_size) - - # export aruco board - aruco_board.export(args.output, args.resolution) - -if __name__ == '__main__': - - main() \ No newline at end of file diff --git a/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py b/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py deleted file mode 100644 index a3a31d0..0000000 --- a/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py +++ /dev/null @@ -1,435 +0,0 @@ -#!/usr/bin/env python - -import argparse -import os -import json - -from argaze import DataStructures -from argaze import GazeFeatures -from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiVideo -from argaze.ArUcoMarkers import * -from argaze.AreaOfInterest import * -from argaze.utils import MiscFeatures - -import numpy -import cv2 as cv - -def main(): - """ - Track ArUco markers into Tobii Glasses Pro 2 segment video file. - For each loaded AOI scene .obj file, position the scene virtually relatively to each detected ArUco markers and project the scene into camera frame. - Then, detect if Tobii gaze point is inside any AOI. - Export AOIs video and data. - """ - - # Manage arguments - parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) - parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='segment path') - parser.add_argument('-r', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)') - parser.add_argument('-c', '--camera_calibration', metavar='CAM_CALIB', type=str, default=None, help='json camera calibration filepath') - parser.add_argument('-p', '--aruco_tracker_configuration', metavar='TRACK_CONFIG', type=str, default=None, help='json aruco tracker configuration filepath') - parser.add_argument('-md', '--marker_dictionary', metavar='MARKER_DICT', type=str, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL,DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)') - parser.add_argument('-ms', '--marker_size', metavar='MARKER_SIZE', type=float, default=6, help='aruco marker size (cm)') - parser.add_argument('-mi', '--marker_id_scene', metavar='MARKER_ID_SCENE', type=json.loads, help='{"marker": "aoi scene filepath"} dictionary') - parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)') - parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction) - args = parser.parse_args() - - if args.segment_path != None: - - # Manage markers id to track - if args.marker_id_scene == None: - print(f'Track any Aruco markers from the {args.marker_dictionary} dictionary') - else: - print(f'Track Aruco markers {list(args.marker_id_scene.keys())} from the {args.marker_dictionary} dictionary') - - # Manage destination path - destination_path = '.' - if args.output != None: - - if not os.path.exists(os.path.dirname(args.output)): - - os.makedirs(os.path.dirname(args.output)) - print(f'{os.path.dirname(args.output)} folder created') - - destination_path = args.output - - else: - - destination_path = args.segment_path - - # Export into a dedicated time range folder - if args.time_range[1] != None: - timerange_path = f'[{int(args.time_range[0])}s - {int(args.time_range[1])}s]' - else: - timerange_path = f'[all]' - - destination_path = f'{destination_path}/{timerange_path}' - - if not os.path.exists(destination_path): - - os.makedirs(destination_path) - print(f'{destination_path} folder created') - - vs_data_filepath = f'{destination_path}/visual_scan.csv' - vs_visu_filepath = f'{destination_path}/visual_scan_marker_%d.jpg' - vs_video_filepath = f'{destination_path}/visual_scan.mp4' - - # Load a tobii segment - tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None) - - # Load a tobii segment video - tobii_segment_video = tobii_segment.load_video() - print(f'Video properties:\n\tduration: {tobii_segment_video.get_duration()/1e6} s\n\twidth: {tobii_segment_video.get_width()} px\n\theight: {tobii_segment_video.get_height()} px') - - # Load a tobii segment data - tobii_segment_data = tobii_segment.load_data() - - print(f'Loaded data count:') - for name in tobii_segment_data.keys(): - print(f'\t{name}: {len(tobii_segment_data[name])} data') - - # Access to timestamped gaze positions data buffer - tobii_ts_gaze_positions = tobii_segment_data['GazePosition'] - - # Access to timestamped gaze 3D positions data buffer - tobii_ts_gaze_positions_3d = tobii_segment_data['GazePosition3D'] - - # Access to timestamped head rotations data buffer - tobii_ts_head_rotations = tobii_segment_data['Gyroscope'] - - # Prepare video exportation at the same format than segment video - output_video = TobiiVideo.TobiiVideoOutput(vs_video_filepath, tobii_segment_video.get_stream()) - - # Create aruco camera - aruco_camera = ArUcoCamera.ArUcoCamera() - - # Load calibration file - if args.camera_calibration != None: - - aruco_camera.load_calibration_file(args.camera_calibration) - - else: - - raise ValueError('.json camera calibration filepath required. Use -c option.') - - # Create aruco tracker - aruco_tracker = ArUcoTracker.ArUcoTracker(args.marker_dictionary, args.marker_size, aruco_camera) - - # Load specific configuration file - if args.aruco_tracker_configuration != None: - - aruco_tracker.load_configuration_file(args.aruco_tracker_configuration) - - print(f'ArUcoTracker configuration for {aruco_tracker.get_markers_dictionay().get_markers_format()} markers detection:') - aruco_tracker.print_configuration() - - # Load AOI 3D scene for each marker and create a AOI 2D scene and frame when a 'Visualisation_Plan' AOI exist - aoi3D_scenes = {} - aoi2D_visu_scenes = {} - aoi2D_visu_frames = {} - - for marker_id, aoi_scene_filepath in args.marker_id_scene.items(): - - marker_id = int(marker_id) - - aoi3D_scenes[marker_id] = AOI3DScene.AOI3DScene() - aoi3D_scenes[marker_id].load(aoi_scene_filepath) - - print(f'AOI in {os.path.basename(aoi_scene_filepath)} scene related to marker #{marker_id}:') - for aoi in aoi3D_scenes[marker_id].keys(): - - # If a 'Visualisation_Plan' AOI exist - # TODO: document this deep feature !!! - if aoi == 'Visualisation_Plan': - - print(f'\tVisualisation_Plan detected: a visual scan picture will be output for this marker.') - - # Create a visual scan visualisation frame - visu_width, visu_height = 1920, 1080 - scene_width, scene_height, __ = aoi3D_scenes[marker_id].size() - - aoi2D_visu_frames[marker_id] = numpy.full((visu_height, visu_width, 3), 255, dtype=numpy.uint8) - - if args.time_range != (0., None): - cv.putText(aoi2D_visu_frames[marker_id], f'Segment time range: {int(args.time_range[0] * 1000)} - {int(args.time_range[1] * 1000)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv.LINE_AA) - - # Project 3D scene onto the visualisation plan - aoi3D_scene_rotation = numpy.array([[-numpy.pi, 0.0, 0.0]]) - aoi3D_scene_translation = aoi3D_scenes[marker_id].center()*[-1, 1, 0] + [0, 0, scene_height] - - # Edit a projection matrix for the reference frame - K0 = numpy.array([[visu_height, 0.0, visu_width/2], [0.0, visu_height, visu_height/2], [0.0, 0.0, 1.0]]) - - aoi2D_visu_scenes[marker_id] = aoi3D_scenes[marker_id].project(aoi3D_scene_translation, aoi3D_scene_rotation, K0) - - for name, aoi in aoi2D_visu_scenes[marker_id].items(): - if name != 'Visualisation_Plan': - aoi.draw(aoi2D_visu_frames[marker_id], (0, 0, 0)) - - else: - - print(f'\t{aoi}') - - def aoi3D_scene_selector(marker_id): - return aoi3D_scenes.get(marker_id, None) - - def aoi2D_visu_scene_selector(marker_id): - return aoi2D_visu_scenes.get(marker_id, None) - - def aoi2D_visu_frame_selector(marker_id): - return aoi2D_visu_frames.get(marker_id, None) - - # Create timestamped buffer to store AOIs scene in time - ts_aois_scenes = AOIFeatures.TimeStampedAOIScenes() - - # Create timestamped buffer to store gaze positions in time - ts_gaze_positions = GazeFeatures.TimeStampedGazePositions() - - # !!! the parameters below are specific to the TobiiGlassesPro2 !!! - # Reference : https://www.biorxiv.org/content/10.1101/299925v1 - tobii_accuracy = 1.42 # degree - tobii_precision = 0.34 # degree - tobii_camera_hfov = 82 # degree - tobii_visual_hfov = 160 # degree - - # Video and data replay loop - try: - - # Initialise progress bar - MiscFeatures.printProgressBar(0, tobii_segment_video.get_duration()/1000, prefix = 'Progress:', suffix = 'Complete', length = 100) - - head_moving = False - head_movement_last = 0. - - # Iterate on video frames - for video_ts, video_frame in tobii_segment_video.frames(): - - video_ts_ms = video_ts / 1000 - - # Copy video frame to edit visualisation on it without disrupting aruco tracking - visu_frame = video_frame.copy() - - # Process video and data frame - try: - - # Get nearest head rotation before video timestamp and remove all head rotations before - _, nearest_head_rotation = tobii_ts_head_rotations.pop_first_until(video_ts) - - # Calculate head movement considering only head yaw and pitch - head_movement = numpy.array(nearest_head_rotation.value) - head_movement_px = head_movement.astype(int) - head_movement_norm = numpy.linalg.norm(head_movement[0:2]) - - # Draw movement vector - cv.line(visu_frame.matrix, (int(visu_frame.width/2), int(visu_frame.height/2)), (int(visu_frame.width/2) + head_movement_px[1], int(visu_frame.height/2) - head_movement_px[0]), (150, 150, 150), 3) - - # Head movement detection hysteresis - # TODO : pass the threshold value as argument - if not head_moving and head_movement_norm > 50: - head_moving = True - - if head_moving and head_movement_norm < 10: - head_moving = False - - # When head is moving, ArUco tracking could return bad pose estimation and so bad AOI scene projection - if head_moving: - raise AOIFeatures.AOISceneMissing('Head is moving') - - # Get nearest gaze position before video timestamp and remove all gaze positions before - _, nearest_gaze_position = tobii_ts_gaze_positions.pop_first_until(video_ts) - - # Ignore frame when gaze position is not valid - if nearest_gaze_position.validity == 1: - raise GazeFeatures.GazePositionMissing('Unvalid gaze position') - - gaze_position_pixel = GazeFeatures.GazePosition( (int(nearest_gaze_position.value[0] * visu_frame.width), int(nearest_gaze_position.value[1] * visu_frame.height)) ) - - # Draw gaze position - cv.circle(visu_frame.matrix, gaze_position_pixel, 2, (0, 255, 255), -1) - - # Get nearest gaze position 3D before video timestamp and remove all gaze positions before - _, nearest_gaze_position_3d = tobii_ts_gaze_positions_3d.pop_first_until(video_ts) - - # Ignore frame when gaze position 3D is not valid - if nearest_gaze_position_3d.validity == 1: - raise GazeFeatures.GazePositionMissing('Unvalid gaze position 3D') - - gaze_accuracy_mm = numpy.tan(numpy.deg2rad(tobii_accuracy)) * nearest_gaze_position_3d.value[2] - tobii_camera_hfov_mm = numpy.tan(numpy.deg2rad(tobii_camera_hfov / 2)) * nearest_gaze_position_3d.value[2] - - gaze_position_pixel.accuracy = round(visu_frame.width * float(gaze_accuracy_mm) / float(tobii_camera_hfov_mm)) - - # Draw gaze accuracy - cv.circle(visu_frame.matrix, gaze_position_pixel, gaze_position_pixel.accuracy, (0, 255, 255), 1) - - # Store gaze position in millisecond for further visual scan processing - ts_gaze_positions[round(video_ts_ms)] = gaze_position_pixel - - # Hide frame left and right borders before tracking to ignore markers outside focus area - cv.rectangle(video_frame.matrix, (0, 0), (int(video_frame.width/6), int(video_frame.height)), (0, 0, 0), -1) - cv.rectangle(video_frame.matrix, (int(video_frame.width*(1 - 1/6)), 0), (int(video_frame.width), int(video_frame.height)), (0, 0, 0), -1) - - # Track markers with pose estimation and draw them - aruco_tracker.track(video_frame.matrix) - aruco_tracker.draw(visu_frame.matrix) - - # When no marker is detected, no AOI scene projection can't be done - if aruco_tracker.get_markers_number() == 0: - raise AOIFeatures.AOISceneMissing('No marker detected') - - # Store aoi 2D video for further scene merging - aoi2D_dict = {} - - # Project 3D scene on each video frame and the visualisation frame - for (i, marker_id) in enumerate(aruco_tracker.get_markers_ids()): - - # Copy 3D scene related to detected marker - aoi3D_scene = aoi3D_scene_selector(marker_id) - - if aoi3D_scene == None: - continue - - # Transform scene into camera referential - aoi3D_camera = aoi3D_scene.transform(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i)) - - # Get aoi inside vision cone field - cone_vision_height_cm = nearest_gaze_position_3d.value[2]/10 # cm - cone_vision_radius_cm = numpy.tan(numpy.deg2rad(tobii_visual_hfov / 2)) * cone_vision_height_cm - - aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(cone_vision_radius_cm, cone_vision_height_cm) - - # Keep only aoi inside vision cone field - aoi3D_scene = aoi3D_scene.copy(exclude=aoi3D_outside.keys()) - - # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it - # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable. - aoi2D_video_scene = aoi3D_scene.project(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i), aruco_camera.get_K()) - - # Store each 2D aoi for further scene merging - for name, aoi in aoi2D_video_scene.items(): - - if name not in aoi2D_dict.keys(): - aoi2D_dict[name] = [] - - aoi2D_dict[name].append(aoi.clockwise()) - - # Select 2D visu scene if there is one for the detected marker - aoi2D_visu_scene = aoi2D_visu_scene_selector(marker_id) - aoi2D_visu_frame = aoi2D_visu_frame_selector(marker_id) - - if aoi2D_visu_scene == None: - continue - - look_at = aoi2D_video_scene['Visualisation_Plan'].look_at(gaze_position_pixel) - - visu_gaze_pixel = aoi2D_visu_scene['Visualisation_Plan'].looked_pixel(look_at) - cv.circle(aoi2D_visu_frame, visu_gaze_pixel, 4, (0, 0, 255), -1) - - # Merge all 2D aoi into a single 2D scene - aoi2D_merged_scene = AOI2DScene.AOI2DScene() - for name, aoi_array in aoi2D_dict.items(): - aoi2D_merged_scene[name] = numpy.sum(aoi_array, axis=0) / len(aoi_array) - - aoi2D_merged_scene.draw(visu_frame.matrix, gaze_position_pixel, exclude=['Visualisation_Plan']) - - # When the merged scene is empty - if len(aoi2D_merged_scene.keys()) == 0: - raise AOIFeatures.AOISceneMissing('Scene is empty') - - # Store 2D merged scene at this time in millisecond - ts_aois_scenes[round(video_ts_ms)] = aoi2D_merged_scene - - # Raised when gaze data is missing - except GazeFeatures.GazePositionMissing as e: - - # Store missing gaze data exception - ts_gaze_positions[round(video_ts_ms)] = e - - cv.rectangle(visu_frame.matrix, (0, 50), (550, 100), (127, 127, 127), -1) - cv.putText(visu_frame.matrix, str(e), (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) - - # Raised when aoi scene is missing - except AOIFeatures.AOISceneMissing as e: - - # Store missing scene exception - ts_aois_scenes[round(video_ts_ms)] = e - - cv.rectangle(visu_frame.matrix, (0, 50), (550, 100), (127, 127, 127), -1) - cv.putText(visu_frame.matrix, str(e), (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) - - # Raised when buffer is empty - except ValueError: - pass - - # Draw focus area - cv.rectangle(visu_frame.matrix, (int(video_frame.width/6), 0), (int(visu_frame.width*(1-1/6)), int(visu_frame.height)), (255, 150, 150), 1) - - # Draw center - cv.line(visu_frame.matrix, (int(visu_frame.width/2) - 50, int(visu_frame.height/2)), (int(visu_frame.width/2) + 50, int(visu_frame.height/2)), (255, 150, 150), 1) - cv.line(visu_frame.matrix, (int(visu_frame.width/2), int(visu_frame.height/2) - 50), (int(visu_frame.width/2), int(visu_frame.height/2) + 50), (255, 150, 150), 1) - - # Write segment timing - cv.rectangle(visu_frame.matrix, (0, 0), (550, 50), (63, 63, 63), -1) - cv.putText(visu_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) - - if args.window: - - # Close window using 'Esc' key - if cv.waitKey(1) == 27: - break - - # Display visualisation - cv.imshow(f'Segment {tobii_segment.get_id()} ArUco AOI', visu_frame.matrix) - - # Display each visual scan frame - for marker_id, aoi2D_visu_frame in aoi2D_visu_frames.items(): - cv.imshow(f'Segment {tobii_segment.get_id()} visual scan for marker {marker_id}', visu_frame.matrix) - - # Write video - output_video.write(visu_frame.matrix) - - # Update Progress Bar - progress = video_ts_ms - int(args.time_range[0] * 1000) - MiscFeatures.printProgressBar(progress, tobii_segment_video.get_duration()/1000, prefix = 'Progress:', suffix = 'Complete', length = 100) - - # Exit on 'ctrl+C' interruption - except KeyboardInterrupt: - pass - - # Stop frame display - cv.destroyAllWindows() - - # End output video file - output_video.close() - - # Print aruco tracking metrics - print('\nAruco marker tracking metrics') - try_count, tracked_counts, rejected_counts = aruco_tracker.get_track_metrics() - - for marker_id, tracked_count in tracked_counts.items(): - print(f'Markers {marker_id} has been detected in {tracked_count} / {try_count} frames ({round(100 * tracked_count / try_count, 2)} %)') - - for marker_id, rejected_count in rejected_counts.items(): - print(f'Markers {marker_id} has been rejected in {rejected_count} / {try_count} frames ({round(100 * rejected_count / try_count, 2)} %)') - - # Build visual scan based on a pointer position - visual_scan = GazeFeatures.PointerBasedVisualScan(ts_aois_scenes, ts_gaze_positions) - print(f'{len(visual_scan.steps())} visual scan steps found') - - # Export visual scan data - visual_scan.export_as_csv(vs_data_filepath) - print(f'Visual scan data saved into {vs_data_filepath}') - - # Export each visual scan picture - for marker_id, aoi2D_visu_frame in aoi2D_visu_frames.items(): - cv.imwrite(vs_visu_filepath % marker_id, visu_frame.matrix) - print(f'Visual scan picture for marker {marker_id} saved into {vs_visu_filepath % marker_id}') - - # Notify when the visual scan video has been exported - print(f'Visual scan video saved into {vs_video_filepath}') - - -if __name__ == '__main__': - - main() \ No newline at end of file diff --git a/src/argaze/utils/export_tobii_segment_movements.py b/src/argaze/utils/export_tobii_segment_movements.py deleted file mode 100644 index b0c273a..0000000 --- a/src/argaze/utils/export_tobii_segment_movements.py +++ /dev/null @@ -1,255 +0,0 @@ -#!/usr/bin/env python - -import argparse -import os - -from argaze import GazeFeatures -from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiVideo -from argaze.utils import MiscFeatures - -import cv2 as cv -import numpy - -def main(): - """ - Analyse Tobii segment fixations - """ - - # Manage arguments - parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) - parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='path to a tobii segment folder') - parser.add_argument('-r', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)') - parser.add_argument('-d', '--dispersion_threshold', metavar='DISPERSION_THRESHOLD', type=int, default=10, help='dispersion threshold in pixel') - parser.add_argument('-t', '--duration_threshold', metavar='DURATION_THRESHOLD', type=int, default=100, help='duration threshold in millisecond') - parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)') - parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction) - args = parser.parse_args() - - if args.segment_path != None: - - # Manage destination path - destination_path = '.' - if args.output != None: - - if not os.path.exists(os.path.dirname(args.output)): - - os.makedirs(os.path.dirname(args.output)) - print(f'{os.path.dirname(args.output)} folder created') - - destination_path = args.output - - else: - - destination_path = args.segment_path - - # Export into a dedicated time range folder - if args.time_range[1] != None: - timerange_path = f'[{int(args.time_range[0])}s - {int(args.time_range[1])}s]' - else: - timerange_path = f'[all]' - - destination_path = f'{destination_path}/{timerange_path}' - - if not os.path.exists(destination_path): - - os.makedirs(destination_path) - print(f'{destination_path} folder created') - - fixations_filepath = f'{destination_path}/movements_fixations.csv' - saccades_filepath = f'{destination_path}/movements_saccades.csv' - - gaze_status_filepath = f'{destination_path}/gaze_status.csv' - gaze_status_video_filepath = f'{destination_path}/gaze_status.mp4' - - # Load a tobii segment - tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None) - - # Load a tobii segment video - tobii_segment_video = tobii_segment.load_video() - print(f'Video properties:\n\tduration: {tobii_segment_video.get_duration()/1e6} s\n\twidth: {tobii_segment_video.get_width()} px\n\theight: {tobii_segment_video.get_height()} px') - - # Load a tobii segment data - tobii_segment_data = tobii_segment.load_data() - - print(f'Loaded data count:') - for name in tobii_segment_data.keys(): - print(f'\t{name}: {len(tobii_segment_data[name])} data') - - # Access to timestamped gaze position data buffer - tobii_ts_gaze_positions = tobii_segment_data['GazePosition'] - - # Access to timestamped gaze 3D positions data buffer - tobii_ts_gaze_positions_3d = tobii_segment_data['GazePosition3D'] - - # Format tobii gaze position in pixel and store them using millisecond unit timestamp - ts_gaze_positions = GazeFeatures.TimeStampedGazePositions() - - # !!! the parameters below are specific to the TobiiGlassesPro2 !!! - # Reference : https://www.biorxiv.org/content/10.1101/299925v1 - tobii_accuracy = 1.42 # degree - tobii_precision = 0.34 # degree - tobii_camera_hfov = 82 # degree - - for ts, tobii_gaze_position in tobii_ts_gaze_positions.items(): - - if tobii_gaze_position.validity == 0: - - gaze_position_pixel = GazeFeatures.GazePosition( (int(tobii_gaze_position.value[0] * tobii_segment_video.get_width()), int(tobii_gaze_position.value[1] * tobii_segment_video.get_height())) ) - - ts_gaze_positions[ts/1000] = gaze_position_pixel - - for ts, tobii_ts_gaze_position_3d in tobii_ts_gaze_positions_3d.items(): - - if tobii_ts_gaze_position_3d.validity == 0: - - gaze_accuracy_mm = numpy.sin(numpy.deg2rad(tobii_accuracy)) * tobii_ts_gaze_position_3d.value[2] - tobii_camera_hfov_mm = numpy.sin(numpy.deg2rad(tobii_camera_hfov)) * tobii_ts_gaze_position_3d.value[2] - - ts_gaze_positions[ts/1000].accuracy = round(tobii_segment_video.get_width() * float(gaze_accuracy_mm) / float(tobii_camera_hfov_mm)) - - print(f'Dispersion threshold: {args.dispersion_threshold}') - print(f'Duration threshold: {args.duration_threshold}') - - # Start movement identification - movement_identifier = GazeFeatures.DispersionBasedMovementIdentifier(ts_gaze_positions, args.dispersion_threshold, args.duration_threshold) - fixations = GazeFeatures.TimeStampedMovements() - saccades = GazeFeatures.TimeStampedMovements() - gaze_status = GazeFeatures.TimeStampedGazeStatus() - - # Initialise progress bar - MiscFeatures.printProgressBar(0, int(tobii_segment_video.get_duration()/1000), prefix = 'Movements identification:', suffix = 'Complete', length = 100) - - for item in movement_identifier: - - if isinstance(item, GazeFeatures.DispersionBasedMovementIdentifier.DispersionBasedFixation): - - start_ts, start_position = item.positions.get_first() - - fixations[start_ts] = item - - for ts, position in item.positions.items(): - - gaze_status[ts] = GazeFeatures.GazeStatus(position, 'Fixation', len(fixations)) - - elif isinstance(item, GazeFeatures.DispersionBasedMovementIdentifier.DispersionBasedSaccade): - - start_ts, start_position = item.positions.get_first() - end_ts, end_position = item.positions.get_last() - - saccades[start_ts] = item - - gaze_status[start_ts] = GazeFeatures.GazeStatus(start_position, 'Saccade', len(saccades)) - gaze_status[end_ts] = GazeFeatures.GazeStatus(end_position, 'Saccade', len(saccades)) - - else: - continue - - # Update Progress Bar - progress = ts - int(args.time_range[0] * 1000) - MiscFeatures.printProgressBar(progress, int(tobii_segment_video.get_duration()/1000), prefix = 'Movements identification:', suffix = 'Complete', length = 100) - - print(f'\n{len(fixations)} fixations and {len(saccades)} saccades found') - - # Export fixations analysis - fixations.export_as_csv(fixations_filepath) - print(f'Fixations saved into {fixations_filepath}') - - # Export saccades analysis - saccades.export_as_csv(saccades_filepath) - print(f'Saccades saved into {saccades_filepath}') - - # Export gaze status analysis - gaze_status.export_as_csv(gaze_status_filepath) - print(f'Gaze status saved into {gaze_status_filepath}') - - # Prepare video exportation at the same format than segment video - output_video = TobiiVideo.TobiiVideoOutput(gaze_status_video_filepath, tobii_segment_video.get_stream()) - - # Video and data loop - try: - - # Initialise progress bar - MiscFeatures.printProgressBar(0, tobii_segment_video.get_duration()/1000, prefix = 'Video with movements processing:', suffix = 'Complete', length = 100) - - current_fixation_ts, current_fixation = fixations.pop_first() - current_fixation_time_counter = 0 - - current_saccade_ts, current_saccade = saccades.pop_first() - - # Iterate on video frames - for video_ts, video_frame in tobii_segment_video.frames(): - - video_ts_ms = video_ts / 1000 - - # write segment timing - cv.putText(video_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) - - # write movement identification parameters - cv.putText(video_frame.matrix, f'Dispersion threshold: {args.dispersion_threshold} px', (20, 100), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) - cv.putText(video_frame.matrix, f'Duration threshold: {args.duration_threshold} ms', (20, 140), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) - - # Draw current fixation - if len(fixations) > 0: - - if video_ts_ms > current_fixation_ts + current_fixation.duration: - - current_fixation_ts, current_fixation = fixations.pop_first() - current_fixation_time_counter = 0 - - # Draw saccade - if len(saccades) > 0: - - if video_ts_ms > current_saccade_ts + current_saccade.duration: - - current_saccade_ts, current_saccade = saccades.pop_first() - start_ts, start_position = current_saccade.positions.pop_first() - end_ts, end_position = current_saccade.positions.pop_first() - - cv.line(video_frame.matrix, start_position, end_position, (0, 0, 255), 2) - - else: - - current_fixation_time_counter += 1 - - cv.circle(video_frame.matrix, current_fixation.centroid, current_fixation.dispersion + current_fixation_time_counter, (0, 255, 0), 1) - - try: - - # Get closest gaze position before video timestamp and remove all gaze positions before - _, nearest_gaze_position = ts_gaze_positions.pop_first_until(video_ts_ms) - - # Draw gaze position and precision - cv.circle(video_frame.matrix, nearest_gaze_position, 2, (0, 255, 255), -1) - cv.circle(video_frame.matrix, nearest_gaze_position, nearest_gaze_position.accuracy, (0, 255, 255), 1) - - # Wait for gaze position - except ValueError: - pass - - if args.window: - - # Close window using 'Esc' key - if cv.waitKey(1) == 27: - break - - # Display video - cv.imshow(f'Segment {tobii_segment.get_id()} movements', video_frame.matrix) - - # Write video - output_video.write(video_frame.matrix) - - # Update Progress Bar - progress = video_ts_ms - int(args.time_range[0] * 1000) - MiscFeatures.printProgressBar(progress, tobii_segment_video.get_duration()/1000, prefix = 'Video with movements processing:', suffix = 'Complete', length = 100) - - # Exit on 'ctrl+C' interruption - except KeyboardInterrupt: - pass - - # End output video file - output_video.close() - print(f'\nVideo with movements saved into {gaze_status_video_filepath}') - -if __name__ == '__main__': - - main() \ No newline at end of file diff --git a/src/argaze/utils/export_tobii_segment_plots.py b/src/argaze/utils/export_tobii_segment_plots.py deleted file mode 100644 index d28bafb..0000000 --- a/src/argaze/utils/export_tobii_segment_plots.py +++ /dev/null @@ -1,128 +0,0 @@ -#!/usr/bin/env python - -import argparse -import os -import json - -from argaze import DataStructures -from argaze import GazeFeatures -from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiVideo -from argaze.utils import MiscFeatures - -import pandas -import matplotlib.pyplot as mpyplot -import matplotlib.patches as mpatches - -def main(): - """ - """ - - # Manage arguments - parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) - parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='segment path') - parser.add_argument('-r', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)') - parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)') - args = parser.parse_args() - - if args.segment_path != None: - - # Manage destination path - destination_path = '.' - if args.output != None: - - if not os.path.exists(os.path.dirname(args.output)): - - os.makedirs(os.path.dirname(args.output)) - print(f'{os.path.dirname(args.output)} folder created') - - destination_path = args.output - - else: - - destination_path = args.segment_path - - data_plots_filepath = f'{destination_path}/plots.svg' - - # Load a tobii segment - tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None) - - # Load a tobii segment video - tobii_segment_video = tobii_segment.load_video() - print(f'Video properties:\n\tduration: {tobii_segment_video.get_duration() / 1e6} s\n\twidth: {tobii_segment_video.get_width()} px\n\theight: {tobii_segment_video.get_height()} px') - - # Load a tobii segment data - tobii_segment_data = tobii_segment.load_data() - - print(f'Loaded data count:') - for name in tobii_segment_data.keys(): - print(f'\t{name}: {len(tobii_segment_data[name])} data') - - # Edit figure - figure_width = min( 4 * tobii_segment_video.get_duration() / 1e6, 56) # maximal width to display: 56 inches at 144 dpi < 2^16 pixels - data_sample = 8064 # 56 inches * 144 dpi = 8064 data can be displayed at max - figure = mpyplot.figure(figsize=(figure_width, 35), dpi=144) - - # Plot pupil diameter data - subplot = figure.add_subplot(711) - subplot.set_title('Pupil diameter', loc='left') - subplot.set_ylim(0, 10) - patches = tobii_segment_data['PupilDiameter'].plot(names=['value'], colors=['#FFD800'], samples=data_sample) - subplot.legend(handles=patches, loc='upper left') - - # Annotate events - df_ts_events = tobii_segment_data['Event'].as_dataframe() - - if len(df_ts_events) > 0: - - for ts, event_type, event_tag in zip(df_ts_events.index, df_ts_events.type, df_ts_events.tag): - subplot.annotate(f'{event_type}\n{event_tag}', xy=(ts, 7), horizontalalignment="left", verticalalignment="top") - subplot.vlines(ts, 0, 6, color="tab:red", linewidth=1) - - # Plot pupil center data - subplot = figure.add_subplot(712) - subplot.set_title('Pupil center', loc='left') - subplot.set_ylim(-40, -20) - patches = tobii_segment_data['PupilCenter'].plot(names=['x','y','z'], colors=['#276FB6','#9427B6','#888888'], split={'value':['x','y','z']}, samples=data_sample) - subplot.legend(handles=patches, loc='upper left') - - # Plot gaze position data - subplot = figure.add_subplot(713) - subplot.set_title('Gaze position', loc='left') - subplot.set_ylim(0., 1.) - patches = tobii_segment_data['GazePosition'].plot(names=['x','y'], colors=['#276FB6','#9427B6'], split={'value':['x','y']}, samples=data_sample) - subplot.legend(handles=patches, loc='upper left') - - # Plot gaze direction data - subplot = figure.add_subplot(714) - subplot.set_title('Gaze direction', loc='left') - patches = tobii_segment_data['GazeDirection'].plot(names=['x','y','z'], colors=['#276FB6','#9427B6','#888888'], split={'value':['x','y','z']}, samples=data_sample) - subplot.legend(handles=patches, loc='upper left') - - # Plot gaze direction data - subplot = figure.add_subplot(715) - subplot.set_title('Gaze position 3D', loc='left') - patches = tobii_segment_data['GazePosition3D'].plot(names=['x','y','z'], colors=['#276FB6','#9427B6','#888888'], split={'value':['x','y','z']}, samples=data_sample) - subplot.legend(handles=patches, loc='upper left') - - # Plot accelerometer data - subplot = figure.add_subplot(716) - subplot.set_title('Accelerometer', loc='left') - patches = tobii_segment_data['Accelerometer'].plot(names=['x','y','z'], colors=['#276FB6','#9427B6','#888888'], split={'value':['x','y','z']}, samples=data_sample) - subplot.legend(handles=patches, loc='upper left') - - # Plot accelerometer data - subplot = figure.add_subplot(717) - subplot.set_title('Gyroscope', loc='left') - patches = tobii_segment_data['Gyroscope'].plot(names=['x','y','z'], colors=['#276FB6','#9427B6','#888888'], split={'value':['x','y','z']}, samples=data_sample) - subplot.legend(handles=patches, loc='upper left') - - # Export figure - mpyplot.tight_layout() - mpyplot.savefig(data_plots_filepath) - mpyplot.close('all') - - print(f'\nData plots saved into {data_plots_filepath}') - -if __name__ == '__main__': - - main() diff --git a/src/argaze/utils/live_tobii_aruco_aoi_ivy_application.py b/src/argaze/utils/live_tobii_aruco_aoi_ivy_application.py deleted file mode 100644 index 70190e2..0000000 --- a/src/argaze/utils/live_tobii_aruco_aoi_ivy_application.py +++ /dev/null @@ -1,122 +0,0 @@ - #!/usr/bin/env python - -import argparse -import os - -from argaze import DataStructures, GazeFeatures -from argaze.ArUcoMarkers import ArUcoMarkersDictionary -from argaze.AreaOfInterest import * - -import cv2 as cv -import numpy - -from ivy.std_api import * - -def main(): - """ - Define AOI scene from a ArUco marker and bind to Ivy default bus to receive live look at pointer data. - """ - - # Manage arguments - parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) - parser.add_argument('-y', '--ivy_bus', metavar='IVY_BUS', type=str, default='0.0.0.0:2010', help='Ivy bus ip and port') - parser.add_argument('-a', '--aoi_scene', metavar='AOI_SCENE', type=str, default='aoi3D_scene.obj', help='obj aoi scene filepath') - parser.add_argument('-d', '--dictionary', metavar='DICT', type=str, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL,DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)') - parser.add_argument('-m', '--marker_size', metavar='MKR', type=float, default=6, help='aruco marker size (cm)') - parser.add_argument('-i', '--marker_id', metavar='MARKER_ID', type=int, default=0, help='marker id to display') - args = parser.parse_args() - - # Enable Ivy bus - IvyInit(os.path.basename(__file__)) - IvyStart(args.ivy_bus) - - def on_looking_message(*args): - - look_at = numpy.fromstring(args[2].replace('[','').replace(']',''), dtype=float, count=2, sep=', ') - - visu_gaze_pixel = aoi2D_visu_scene[args[1]].looked_pixel(look_at) - - cv.circle(visu_frame, visu_gaze_pixel, 4, (0, 0, 255), -1) - - IvyBindMsg(on_looking_message, 'looking (.*) at (.*)') - - # Create AOIs 3D scene - aoi3D_scene = AOI3DScene.AOI3DScene() - aoi3D_scene.load(args.aoi_scene) - print(f'AOIs names: {aoi3D_scene.keys()}') - - # Create a visual scan visualisation frame - visu_width = 1920 - visu_height = 1080 - visu_ratio = visu_height - visu_frame = numpy.full((visu_height, visu_width, 3), 255, dtype=numpy.uint8) - - cv.imshow('Scene', visu_frame) - - # Project 3D scene on the reference frame - # TODO : center projection on a reference AOI - ref_aoi = 'Scene_Plan' - - # TODO: pass the reference AOI in argument - aoi3D_scene_rotation = numpy.array([[-numpy.pi, 0.0, 0.0]]) - aoi3D_scene_translation = numpy.array([[19.0, 8.0, 25.0]]) - - # Edit a projection matrix for the reference frame - K0 = numpy.array([[visu_ratio, 0.0, visu_width/2], [0.0, visu_ratio, visu_height/2], [0.0, 0.0, 1.0]]) - - aoi2D_visu_scene = aoi3D_scene.project(aoi3D_scene_translation, aoi3D_scene_rotation, K0) - - # Create aruco markers dictionary - aruco_markers_dict = ArUcoMarkersDictionary.ArUcoMarkersDictionary(args.dictionary) - - # Create aruco marker - marker_box = aoi2D_visu_scene['Marker_Plan'].bounding_box().astype(int) - marker_size = marker_box[2] - marker_box[0] - marker = aruco_markers_dict.create_marker(args.marker_id, int(marker_size[0])) - print(f'Creating Aruco marker {args.marker_id} from the {args.dictionary} dictionary') - - def draw_scene(): - - # Clear frame - visu_frame[:] = 255 - - # Display AOI 2D scene - for name, aoi in aoi2D_visu_scene.items(): - aoi.draw(visu_frame, (0, 0, 0)) - - # Display aruco marker - visu_frame[marker_box[0][1]:marker_box[2][1], marker_box[0][0]:marker_box[2][0], :] = marker - - # On mouse over : redraw scene and draw target - def on_mouse_event(event, x, y, flags, param): - - draw_scene() - - # Draw target - cv.circle(visu_frame, (x, y), 40, (0, 255, 255), -1) - - cv.setMouseCallback('Scene', on_mouse_event) - - # Screen display loop - try: - - draw_scene() - - while True: - - # Close window using 'Esc' key - if cv.waitKey(1) == 27: - break - - cv.imshow('Scene', visu_frame) - - # Exit on 'ctrl+C' interruption - except KeyboardInterrupt: - pass - - # Stop frame display - cv.destroyAllWindows() - -if __name__ == '__main__': - - main() \ No newline at end of file diff --git a/src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py b/src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py deleted file mode 100644 index 070e3ee..0000000 --- a/src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py +++ /dev/null @@ -1,286 +0,0 @@ - #!/usr/bin/env python - -import argparse -import os -import json - -from argaze import DataStructures, GazeFeatures -from argaze.TobiiGlassesPro2 import * -from argaze.ArUcoMarkers import ArUcoTracker, ArUcoCamera -from argaze.AreaOfInterest import * - -import cv2 as cv -import numpy - -from ivy.std_api import * - -def main(): - """ - Track any ArUco marker into Tobii Glasses Pro 2 camera video stream. - For each loaded AOI scene .obj file, position the scene virtually relatively to each detected ArUco markers and project the scene into camera frame. - Then, detect if Tobii gaze point is inside any AOI and send the look at pointer over Ivy default bus. - """ - - # Manage arguments - parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) - parser.add_argument('-t', '--tobii_ip', metavar='TOBII_IP', type=str, default='192.168.1.10', help='tobii glasses ip') - parser.add_argument('-p', '--project_name', metavar='PROJECT_NAME', type=str, default=TobiiController.DEFAULT_PROJECT_NAME, help='project name') - parser.add_argument('-u', '--participant_name', metavar='PARTICIPANT_NAME', type=str, default=TobiiController.DEFAULT_PARTICIPANT_NAME, help='participant name') - parser.add_argument('-c', '--camera_calibration', metavar='CAM_CALIB', type=str, default='tobii_camera.json', help='json camera calibration filepath') - parser.add_argument('-y', '--ivy_bus', metavar='IVY_BUS', type=str, default='0.0.0.0:2010', help='Ivy bus ip and port') - parser.add_argument('-md', '--marker_dictionary', metavar='DICT', type=str, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL,DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)') - parser.add_argument('-ms', '--marker_size', metavar='MKR', type=float, default=6, help='aruco marker size (cm)') - parser.add_argument('-mi', '--marker_id_scene', metavar='MARKER_ID_SCENE', type=json.loads, help='{"marker": "aoi scene filepath"} dictionary') - args = parser.parse_args() - - # Manage markers id to track - if args.marker_id_scene == None: - print(f'Track any Aruco markers from the {args.marker_dictionary} dictionary') - else: - print(f'Track Aruco markers {args.marker_id_scene.keys()} from the {args.marker_dictionary} dictionary') - - # Enable Ivy bus - IvyInit(os.path.basename(__file__)) - IvyStart(args.ivy_bus) - - # Create tobii controller - tobii_controller = TobiiController.TobiiController(args.tobii_ip, args.project_name, args.participant_name) - - # Calibrate tobii glasses - tobii_controller.calibrate() - - # Enable tobii data stream - tobii_data_stream = tobii_controller.enable_data_stream() - - # Enable tobii video stream - tobii_video_stream = tobii_controller.enable_video_stream() - - # create aruco camera - aruco_camera = ArUcoCamera.ArUcoCamera() - aruco_camera.load_calibration_file(args.camera_calibration) - - # Create aruco tracker - aruco_tracker = ArUcoTracker.ArUcoTracker(args.marker_dictionary, args.marker_size, aruco_camera) - - # Load AOI 3D scene for each marker - aoi3D_scenes = {} - - for marker_id, aoi_scene_filepath in args.marker_id_scene.items(): - - marker_id = int(marker_id) - - aoi3D_scenes[marker_id] = AOI3DScene.AOI3DScene() - aoi3D_scenes[marker_id].load(aoi_scene_filepath) - - print(f'AOI in {os.path.basename(aoi_scene_filepath)} scene related to marker #{marker_id}') - for aoi in aoi3D_scenes[marker_id].keys(): - print(f'\t{aoi}') - - def aoi3D_scene_selector(marker_id): - return aoi3D_scenes.get(marker_id, None) - - # !!! the parameters below are specific to the TobiiGlassesPro2 !!! - # Reference : https://www.biorxiv.org/content/10.1101/299925v1 - tobii_accuracy = 1.42 # degree - tobii_precision = 0.34 # degree - tobii_camera_hfov = 82 # degree - tobii_visual_hfov = 160 # degree - - # Start streaming - tobii_controller.start_streaming() - - # Live video stream capture loop - try: - - past_gaze_positions = DataStructures.TimeStampedBuffer() - past_head_rotations = DataStructures.TimeStampedBuffer() - - head_moving = False - head_movement_last = 0. - - while tobii_video_stream.is_alive(): - - video_ts, video_frame = tobii_video_stream.read() - - # Copy video frame to edit visualisation on it without disrupting aruco tracking - visu_frame = video_frame.copy() - - # Process video and data frame - try: - - # Read data stream - data_stream = tobii_data_stream.read() - - # Store last received data - past_head_rotations.append(data_stream['Gyroscope']) - past_gaze_positions.append(data_stream['GazePosition']) - - # Get nearest head rotation before video timestamp and remove all head rotations before - _, nearest_head_rotation = tobii_ts_head_rotations.pop_first_until(video_ts) - - # Calculate head movement considering only head yaw and pitch - head_movement = numpy.array(nearest_head_rotation.value) - head_movement_px = head_movement.astype(int) - head_movement_norm = numpy.linalg.norm(head_movement[0:2]) - - # Draw movement vector - cv.line(visu_frame.matrix, (int(visu_frame.width/2), int(visu_frame.height/2)), (int(visu_frame.width/2) + head_movement_px[1], int(visu_frame.height/2) - head_movement_px[0]), (150, 150, 150), 3) - - # Head movement detection hysteresis - # TODO : pass the threshold value as argument - if not head_moving and head_movement_norm > 50: - head_moving = True - - if head_moving and head_movement_norm < 10: - head_moving = False - - # When head is moving, ArUco tracking could return bad pose estimation and so bad AOI scene projection - if head_moving: - raise AOIFeatures.AOISceneMissing('Head is moving') - - # Get nearest gaze position before video timestamp and remove all gaze positions before - _, nearest_gaze_position = tobii_ts_gaze_positions.pop_first_until(video_ts) - - # Ignore frame when gaze position is not valid - if nearest_gaze_position.validity == 1: - raise GazeFeatures.GazePositionMissing('Unvalid gaze position') - - gaze_position_pixel = GazeFeatures.GazePosition( (int(nearest_gaze_position.value[0] * visu_frame.width), int(nearest_gaze_position.value[1] * visu_frame.height)) ) - - # Draw gaze position - cv.circle(visu_frame.matrix, gaze_position_pixel, 2, (0, 255, 255), -1) - - # Get nearest gaze position 3D before video timestamp and remove all gaze positions before - _, nearest_gaze_position_3d = tobii_ts_gaze_positions_3d.pop_first_until(video_ts) - - # Ignore frame when gaze position 3D is not valid - if nearest_gaze_position_3d.validity == 1: - raise GazeFeatures.GazePositionMissing('Unvalid gaze position 3D') - - gaze_position_pixel = (int(nearest_gaze_position.value[0] * visu_frame.width), int(nearest_gaze_position.value[1] * visu_frame.height)) - - gaze_accuracy_mm = numpy.tan(numpy.deg2rad(tobii_accuracy)) * nearest_gaze_position_3d.value[2] - tobii_camera_hfov_mm = numpy.tan(numpy.deg2rad(tobii_camera_hfov / 2)) * nearest_gaze_position_3d.value[2] - - gaze_position_pixel.accuracy = round(visu_frame.width * float(gaze_accuracy_mm) / float(tobii_camera_hfov_mm)) - - # Draw gaze position and accuracy - cv.circle(visu_frame.matrix, gaze_position_pixel, 2, (0, 255, 255), -1) - cv.circle(visu_frame.matrix, gaze_position_pixel, gaze_position_pixel.accuracy, (0, 255, 255), 1) - - # Hide frame left and right borders before tracking to ignore markers outside focus area - cv.rectangle(video_frame.matrix, (0, 0), (int(video_frame.width/6), int(video_frame.height)), (0, 0, 0), -1) - cv.rectangle(video_frame.matrix, (int(video_frame.width*(1 - 1/6)), 0), (int(video_frame.width), int(video_frame.height)), (0, 0, 0), -1) - - # Track markers with pose estimation and draw them - aruco_tracker.track(video_frame.matrix) - aruco_tracker.draw(visu_frame.matrix) - - # When no marker is detected, no AOI scene projection can't be done - if aruco_tracker.get_markers_number() == 0: - raise AOIFeatures.AOISceneMissing('No marker detected') - - # Store aoi 2D video for further scene merging - aoi2D_dict = {} - - # Project 3D scenes related to each aruco markers - for (i, marker_id) in enumerate(aruco_tracker.get_markers_ids()): - - # Select 3D scene related to detected marker - aoi3D_scene = aoi3D_scene_selector(marker_id) - - if aoi3D_scene == None: - continue - - # Transform scene into camera referential - aoi3D_camera = aoi3D_scene.transform(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i)) - - # Get aoi inside vision cone field - cone_vision_height_cm = nearest_gaze_position_3d.value[2]/10 # cm - cone_vision_radius_cm = numpy.tan(numpy.deg2rad(tobii_visual_hfov / 2)) * cone_vision_height_cm - - aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(cone_vision_radius_cm, cone_vision_height_cm) - - # Keep only aoi inside vision cone field - aoi3D_scene = aoi3D_scene.copy(exclude=aoi3D_outside.keys()) - - # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it - # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable. - aoi2D_video_scene = aoi3D_scene.project(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i), aruco_camera.get_K()) - - # Store each 2D aoi for further scene merging - for name, aoi in aoi2D_video_scene.items(): - - if name not in aoi2D_dict.keys(): - aoi2D_dict[name] = [] - - aoi2D_dict[name].append(aoi.clockwise()) - - # Merge all 2D aoi into a single 2D scene - aoi2D_merged_scene = AOI2DScene.AOI2DScene() - for name, aoi_array in aoi2D_dict.items(): - aoi2D_merged_scene[name] = numpy.sum(aoi_array, axis=0) / len(aoi_array) - - aoi2D_merged_scene.draw(visu_frame.matrix, gaze_position_pixel, gaze_position_pixel.accuracy, exclude=['Visualisation_Plan']) - - # When the merged scene is empty - if len(aoi2D_merged_scene.keys()) == 0: - raise AOIFeatures.AOISceneMissing('Scene is empty') - - # Send look at aoi pointer - for name, aoi in aoi2D_merged_scene.items(): - - if aoi.looked(video_gaze_pixel): - - # 4 corners aoi - if len(aoi) == 4: - IvySendMsg(f'looking {name} at {aoi.look_at(video_gaze_pixel)}') - else: - IvySendMsg(f'looking {name}') - - # Raised when gaze data can't be processed - except GazeFeatures.GazeDataMissing as e: - - cv.rectangle(visu_frame.matrix, (0, 50), (550, 100), (127, 127, 127), -1) - cv.putText(visu_frame.matrix, str(e), (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) - - # Raised when aoi scene is missing - except AOIFeatures.AOISceneMissing as e: - - cv.rectangle(visu_frame.matrix, (0, 50), (550, 100), (127, 127, 127), -1) - cv.putText(visu_frame.matrix, str(e), (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) - - # Raised when buffer is empty - except ValueError: - pass - - # Draw focus area - cv.rectangle(visu_frame.matrix, (int(video_frame.width/6), 0), (int(visu_frame.width*(1-1/6)), int(visu_frame.height)), (255, 150, 150), 1) - - # Draw center - cv.line(visu_frame.matrix, (int(visu_frame.width/2) - 50, int(visu_frame.height/2)), (int(visu_frame.width/2) + 50, int(visu_frame.height/2)), (255, 150, 150), 1) - cv.line(visu_frame.matrix, (int(visu_frame.width/2), int(visu_frame.height/2) - 50), (int(visu_frame.width/2), int(visu_frame.height/2) + 50), (255, 150, 150), 1) - - # Write stream timing - cv.rectangle(visu_frame.matrix, (0, 0), (550, 50), (63, 63, 63), -1) - cv.putText(visu_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) - - # Close window using 'Esc' key - if cv.waitKey(1) == 27: - break - - cv.imshow('Live Scene', visu_frame.matrix) - - # Exit on 'ctrl+C' interruption - except KeyboardInterrupt: - pass - - # Stop frame display - cv.destroyAllWindows() - - # Stop streaming - tobii_controller.stop_streaming() - -if __name__ == '__main__': - - main() \ No newline at end of file diff --git a/src/argaze/utils/live_tobii_session.py b/src/argaze/utils/live_tobii_session.py deleted file mode 100644 index b849357..0000000 --- a/src/argaze/utils/live_tobii_session.py +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env python - -import argparse -import os, time - -from argaze import DataStructures, GazeFeatures -from argaze.TobiiGlassesPro2 import * - -import cv2 as cv -import numpy - -def main(): - """ - Capture video camera and gaze data streams and synchronise them. - """ - - # Manage arguments - parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) - parser.add_argument('-t', '--tobii_ip', metavar='TOBII_IP', type=str, default='192.168.1.12', help='tobii glasses ip') - - args = parser.parse_args() - - # Create tobii controller - tobii_controller = TobiiController.TobiiController(args.tobii_ip, 'myProject', 'mySelf') - - # Calibrate tobii glasses - tobii_controller.calibrate() - - # Enable tobii data stream - tobii_data_stream = tobii_controller.enable_data_stream() - - # Enable tobii video stream - tobii_video_stream = tobii_controller.enable_video_stream() - - # Start streaming - tobii_controller.start_streaming() - - # Live video stream capture loop - try: - - past_gaze_positions = DataStructures.TimeStampedBuffer() - - while tobii_video_stream.is_alive(): - - video_ts, video_frame = tobii_video_stream.read() - - try: - - # Read data stream - data_stream = tobii_data_stream.read() - - # Store received gaze positions - past_gaze_positions.append(data_stream['GazePosition']) - - # Get last gaze position before video timestamp and remove all former gaze positions - earliest_ts, earliest_gaze_position = past_gaze_positions.pop_first_until(video_ts) - - # Draw gaze position - video_gaze_pixel = (int(earliest_gaze_position.value[0] * video_frame.width), int(earliest_gaze_position.value[1] * video_frame.height)) - cv.circle(video_frame.matrix, video_gaze_pixel, 4, (0, 255, 255), -1) - - # Wait for gaze position - except (AttributeError, ValueError): - continue - - # Close window using 'Esc' key - if cv.waitKey(1) == 27: - break - - cv.imshow(f'Live Tobii Camera', video_frame.matrix) - - # Exit on 'ctrl+C' interruption - except KeyboardInterrupt: - pass - - # Stop frame display - cv.destroyAllWindows() - - # Stop streaming - tobii_controller.stop_streaming() - -if __name__ == '__main__': - - main() \ No newline at end of file diff --git a/src/argaze/utils/record_tobii_session.py b/src/argaze/utils/record_tobii_session.py deleted file mode 100644 index a45727b..0000000 --- a/src/argaze/utils/record_tobii_session.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python - -import argparse -import threading -import time -import random - -from argaze.TobiiGlassesPro2 import TobiiController -from argaze.utils import MiscFeatures - -def main(): - """ - Record a Tobii Glasses Pro 2 session on Tobii interface's SD Card - """ - - # Manage arguments - parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) - parser.add_argument('-t', '--tobii_ip', metavar='TOBII_IP', type=str, default='192.168.1.10', help='tobii glasses ip') - parser.add_argument('-p', '--project_name', metavar='PROJECT_NAME', type=str, default=TobiiController.DEFAULT_PROJECT_NAME, help='project name') - parser.add_argument('-u', '--participant_name', metavar='PARTICIPANT_NAME', type=str, default=TobiiController.DEFAULT_PARTICIPANT_NAME, help='participant name') - args = parser.parse_args() - - # Create tobii controller - tobii_controller = TobiiController.TobiiController(args.tobii_ip, args.project_name, args.participant_name) - - # Calibrate tobii glasses - tobii_controller.calibrate() - - # Create recording - recording_id = tobii_controller.create_recording(args.participant_name) - - # Start recording - tobii_controller.start_recording(recording_id) - print('Recording started') - - # Define loop - last_battery_level = 0 - time_count = 0 - - exit = MiscFeatures.ExitSignalHandler() - print('Waiting for Ctrl+C to quit...\n') - - while not exit.status(): - - # Print storage info each minutes - if time_count % 60 == 0: - - print(tobii_controller.get_storage_info()) - - # print battery level each time it changes - # send it as experimental variable - battery_level = tobii_controller.get_battery_level() - if battery_level != last_battery_level: - - print(tobii_controller.get_battery_info()) - - tobii_controller.send_variable('battery', battery_level) - - last_battery_level = battery_level - - # send random event each 3 - 10 seconds - if time_count % random.randint(3, 10) == 0: - - print('Send random event') - - tobii_controller.send_event('random') - - # Sleep 1 second - time.sleep(1) - time_count += 1 - - # Stop recording - tobii_controller.stop_recording(recording_id) - print('Recording stopped') - -if __name__ == '__main__': - - main() \ No newline at end of file diff --git a/src/argaze/utils/replay_tobii_session.py b/src/argaze/utils/replay_tobii_session.py deleted file mode 100644 index 86d0057..0000000 --- a/src/argaze/utils/replay_tobii_session.py +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env python - -import argparse - -from argaze import GazeFeatures -from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiVideo, TobiiData -from argaze.utils import MiscFeatures - -import numpy - -import cv2 as cv - -def main(): - """ - Replay Tobii segment video - """ - - # manage arguments - parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) - parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='segment path') - parser.add_argument('-r', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)') - parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction) - args = parser.parse_args() - - if args.segment_path != None: - - # Load a tobii segment - tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None) - - # Load a tobii segment video - tobii_segment_video = tobii_segment.load_video() - print(f'Video properties:\n\tduration: {tobii_segment_video.get_duration() / 1e6} s\n\twidth: {tobii_segment_video.get_width()} px\n\theight: {tobii_segment_video.get_height()} px') - - # Load a tobii segment data - tobii_segment_data = tobii_segment.load_data() - - print(f'Loaded data count:') - for name in tobii_segment_data.keys(): - print(f'\t{name}: {len(tobii_segment_data[name])} data') - - # Access to timestamped gaze position data buffer - tobii_ts_gaze_positions = tobii_segment_data['GazePosition'] - - # Access to timestamped pupil diameter data buffer - tobii_ts_pupil_diameter = tobii_segment_data['PupilDiameter'] - - # Access to timestamped events data buffer - tobii_ts_events = tobii_segment_data['Event'] - - # Video and data replay loop - try: - - # Initialise progress bar - MiscFeatures.printProgressBar(0, tobii_segment_video.get_duration() / 1e3, prefix = 'Video progression:', suffix = 'Complete', length = 100) - - # Iterate on video frames - for video_ts, video_frame in tobii_segment_video.frames(): - - video_ts_ms = video_ts / 1e3 - - # Write segment timing - cv.putText(video_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) - - try: - - # Get closest gaze position before video timestamp and remove all gaze positions before - closest_gaze_ts, closest_gaze_position = tobii_ts_gaze_positions.pop_first_until(video_ts) - - # Get closest pupil diameter before video timestamp and remove all pupil diameters before - closest_pupil_ts, closest_pupil_diameter = tobii_ts_pupil_diameter.pop_first_until(video_ts) - - # Draw gaze position - gaze_position = (int(closest_gaze_position.value[0] * video_frame.width), int(closest_gaze_position.value[1] * video_frame.height)) - pupil_diameter = int((10 - closest_pupil_diameter.value) / 2) - - cv.circle(video_frame.matrix, gaze_position, 10, (0, 255, 255), pupil_diameter) - - # Wait for gaze position - except ValueError: - continue - - try: - - # Get closest event before video timestamp and remove all gaze positions before - closest_event_ts, closest_event = tobii_ts_events.pop_first_until(video_ts) - - print(closest_event_ts / 1e3, closest_event) - - # Write events - cv.putText(video_frame.matrix, str(closest_event), (20, 140), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) - - except ValueError: - pass - - if args.window: - - # Close window using 'Esc' key - if cv.waitKey(1) == 27: - break - - cv.imshow(f'Segment {tobii_segment.get_id()} video', video_frame.matrix) - - # Update Progress Bar - progress = video_ts_ms - int(args.time_range[0] * 1e3) - MiscFeatures.printProgressBar(progress, tobii_segment_video.get_duration() / 1e3, prefix = 'Video progression:', suffix = 'Complete', length = 100) - - # Exit on 'ctrl+C' interruption - except KeyboardInterrupt: - pass - - # Stop frame display - cv.destroyAllWindows() - -if __name__ == '__main__': - - main() \ No newline at end of file diff --git a/src/argaze/utils/tobii_camera_calibrate.py b/src/argaze/utils/tobii_camera_calibrate.py new file mode 100644 index 0000000..61fc56c --- /dev/null +++ b/src/argaze/utils/tobii_camera_calibrate.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python + +import argparse +import os +import time + +from argaze.TobiiGlassesPro2 import TobiiController, TobiiVideo +from argaze.ArUcoMarkers import ArUcoBoard, ArUcoTracker, ArUcoCamera + +import cv2 as cv + +def main(): + """ + Captures board pictures and finally outputs camera calibration data into a .json file. + + - Export and print a calibration board using + - Place the calibration board in order to view it entirely on screen and move the camera in many configurations (orientation and distance) : the script will automatically take pictures. Do this step with a good lighting and a clear background. + - Once enough pictures have been captured (~20), press Esc key then, wait for the camera calibration processing. + - Finally, check rms parameter: it should be between 0. and 1. if the calibration suceeded (lower is better). + + ### Reference: + - [Camera calibration using ArUco marker tutorial](https://automaticaddison.com/how-to-perform-camera-calibration-using-opencv/) + """ + + # manage arguments + parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) + parser.add_argument('columns', metavar='COLS_NUMBER', type=int, default=7, help='number of columns') + parser.add_argument('rows', metavar='ROWS_NUMBER', type=int, default=5, help='number of rows') + parser.add_argument('square_size', metavar='SQUARE_SIZE', type=float, default=5, help='square size (cm)') + parser.add_argument('marker_size', metavar='MARKER_SIZE', type=float, default=3, help='marker size (cm)') + parser.add_argument('-t', '--tobii_ip', metavar='TOBII_IP', type=str, default='192.168.1.10', help='tobii glasses ip') + parser.add_argument('-o', '--output', metavar='OUT', type=str, default='camera.json', help='destination filepath') + parser.add_argument('-d', '--dictionary', metavar='DICT', type=str, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL,DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)') + args = parser.parse_args() + + # Create tobii controller + tobii_controller = TobiiController.TobiiController(args.tobii_ip, 'myProject', 'mySelf') + + tobii_controller.set_video_freq_25() + + # Enable tobii video stream + tobii_video_stream = tobii_controller.enable_video_stream() + + # Create aruco camera + aruco_camera = ArUcoCamera.ArUcoCamera() + + # Create aruco board + aruco_board = ArUcoBoard.ArUcoBoard(args.dictionary, args.columns, args.rows, args.square_size, args.marker_size) + + # Create aruco tracker + aruco_tracker = ArUcoTracker.ArUcoTracker(args.dictionary, args.marker_size, aruco_camera) + + # Start tobii glasses streaming + tobii_controller.start_streaming() + + print("Camera calibration starts") + print("Waiting for calibration board...") + + expected_markers_number = aruco_board.get_markers_number() + expected_corners_number = aruco_board.get_corners_number() + + # capture loop + try: + + while tobii_video_stream.is_alive(): + + # capture frame with a full displayed board + video_ts, video_frame = tobii_video_stream.read() + + # track all markers in the board + aruco_tracker.track_board(video_frame.matrix, aruco_board, expected_markers_number) + + # draw only markers + aruco_tracker.draw(video_frame.matrix) + + # draw current calibration data count + cv.putText(video_frame.matrix, f'Capture: {aruco_camera.get_calibration_data_count()}', (50, 50), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv.LINE_AA) + cv.imshow('Tobii Camera Calibration', video_frame.matrix) + + # if all board corners are detected + if aruco_tracker.get_board_corners_number() == expected_corners_number: + + # draw board corners to notify a capture is done + aruco_tracker.draw_board(video_frame.matrix) + + # append data + aruco_camera.store_calibration_data(aruco_tracker.get_board_corners(), aruco_tracker.get_board_corners_ids()) + + cv.imshow('Tobii Camera Calibration', video_frame.matrix) + + # close window using 'Esc' key + if cv.waitKey(1) == 27: + break + + # exit on 'ctrl+C' interruption + except KeyboardInterrupt: + pass + + # stop frame display + cv.destroyAllWindows() + + # Stop tobii glasses streaming + tobii_controller.stop_streaming() + + print('\nCalibrating camera...') + aruco_camera.calibrate(aruco_board, video_frame.width, video_frame.height) + + print('\nCalibration succeeded!') + print(f'\nRMS:\n{aruco_camera.get_rms()}') + print(f'\nDimensions:\n{video_frame.width}x{video_frame.height}') + print(f'\nCamera matrix:\n{aruco_camera.get_K()}') + print(f'\nDistortion coefficients:\n{aruco_camera.get_D()}') + + aruco_camera.save_calibration_file(args.output) + + print(f'\nCalibration data exported into {args.output} file') + +if __name__ == '__main__': + + main() diff --git a/src/argaze/utils/tobii_sdcard_explore.py b/src/argaze/utils/tobii_sdcard_explore.py new file mode 100644 index 0000000..e80057f --- /dev/null +++ b/src/argaze/utils/tobii_sdcard_explore.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python + +import argparse + +from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiData, TobiiVideo + +def main(): + """ + Explore Tobii Glasses Pro 2 interface's SD Card + """ + + # manage arguments + parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) + parser.add_argument('-d', '--drive_path', metavar='DRIVE_PATH', type=str, default=None, help='drive path') + parser.add_argument('-p', '--project_path', metavar='PROJECT_PATH', type=str, default=None, help='project path') + parser.add_argument('-r', '--recording_path', metavar='RECORDING_PATH', type=str, default=None, help='recording path') + parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='segment path') + args = parser.parse_args() + + if args.drive_path != None: + + # Load all projects from a tobii drive + tobii_drive = TobiiEntities.TobiiDrive(args.drive_path) + + for project in tobii_drive.get_all_projects(): + print(f'Project id: {project.get_id()}, name: {project.get_name()}') + + elif args.project_path != None: + + # Load one tobii project + tobii_project = TobiiEntities.TobiiProject(args.project_path) + + for participant in tobii_project.get_all_participants(): + print(f'Participant id: {participant.get_id()}, name: {participant.get_name()}') + + for recording in tobii_project.get_all_recordings(): + print(f'Recording id: {recording.get_id()}, name: {recording.get_name()}') + + elif args.recording_path != None: + + # Load a tobii segment + tobii_recording = TobiiEntities.TobiiRecording(args.recording_path) + + for segment in tobii_recording.get_all_segments(): + print(f'Segment id: {segment.get_id()}') + + elif args.segment_path != None: + + # Load a tobii segment + tobii_segment = TobiiEntities.TobiiSegment(args.segment_path) + + tobii_segment_video = tobii_segment.get_video() + print(f'Video width: {tobii_segment_video.get_width()}, height: {tobii_segment_video.get_height()}, fps: {tobii_segment_video.get_fps()}') + + tobii_segment_data = tobii_segment.get_data() + + data = tobii_segment_data.load() + + for key in data.keys(): + print(f'{key}: {len(data[key])} items') + print(f'{key} first item: {data[key].popitem()} items') + +if __name__ == '__main__': + + main() \ No newline at end of file diff --git a/src/argaze/utils/tobii_segment_aruco_aoi_edit.py b/src/argaze/utils/tobii_segment_aruco_aoi_edit.py new file mode 100644 index 0000000..61d695d --- /dev/null +++ b/src/argaze/utils/tobii_segment_aruco_aoi_edit.py @@ -0,0 +1,430 @@ +#!/usr/bin/env python + +import argparse +import os +import json +import time + +from argaze import DataStructures +from argaze import GazeFeatures +from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiVideo +from argaze.ArUcoMarkers import * +from argaze.AreaOfInterest import * +from argaze.utils import MiscFeatures + +import numpy +import cv2 as cv + +def main(): + """ + Open video file with ArUco marker scene inside + """ + + # Manage arguments + parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) + parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='segment path') + parser.add_argument('-r', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)') + parser.add_argument('-c', '--camera_calibration', metavar='CAM_CALIB', type=str, default=None, help='json camera calibration filepath') + parser.add_argument('-p', '--aruco_tracker_configuration', metavar='TRACK_CONFIG', type=str, default=None, help='json aruco tracker configuration filepath') + parser.add_argument('-md', '--marker_dictionary', metavar='MARKER_DICT', type=str, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL,DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)') + parser.add_argument('-ms', '--marker_size', metavar='MARKER_SIZE', type=float, default=6, help='aruco marker size (cm)') + parser.add_argument('-mi', '--marker_id_scene', metavar='MARKER_ID_SCENE', type=json.loads, help='{"marker": "aoi scene filepath"} dictionary') + parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)') + parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction) + args = parser.parse_args() + + if args.segment_path != None: + + # Manage markers id to track + if args.marker_id_scene == None: + print(f'Track any Aruco markers from the {args.marker_dictionary} dictionary') + else: + print(f'Track Aruco markers {list(args.marker_id_scene.keys())} from the {args.marker_dictionary} dictionary') + + # Manage destination path + destination_path = '.' + if args.output != None: + + if not os.path.exists(os.path.dirname(args.output)): + + os.makedirs(os.path.dirname(args.output)) + print(f'{os.path.dirname(args.output)} folder created') + + destination_path = args.output + + else: + + destination_path = args.segment_path + + # Export into a dedicated time range folder + if args.time_range[1] != None: + timerange_path = f'[{int(args.time_range[0])}s - {int(args.time_range[1])}s]' + else: + timerange_path = f'[all]' + + destination_path = f'{destination_path}/{timerange_path}' + + if not os.path.exists(destination_path): + + os.makedirs(destination_path) + print(f'{destination_path} folder created') + + #vs_data_filepath = f'{destination_path}/visual_scan.csv' + + # Load a tobii segment + tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None) + + # Load a tobii segment video + tobii_segment_video = tobii_segment.load_video() + print(f'Video properties:\n\tduration: {tobii_segment_video.get_duration()/1e6} s\n\twidth: {tobii_segment_video.get_width()} px\n\theight: {tobii_segment_video.get_height()} px') + + # Create aruco camera + aruco_camera = ArUcoCamera.ArUcoCamera() + + # Load calibration file + if args.camera_calibration != None: + + aruco_camera.load_calibration_file(args.camera_calibration) + + else: + + raise ValueError('.json camera calibration filepath required. Use -c option.') + + # Create aruco tracker + aruco_tracker = ArUcoTracker.ArUcoTracker(args.marker_dictionary, args.marker_size, aruco_camera) + + # Load specific configuration file + def load_configuration_file(): + + if args.aruco_tracker_configuration != None: + + aruco_tracker.load_configuration_file(args.aruco_tracker_configuration) + + print(f'ArUcoTracker configuration for {aruco_tracker.get_markers_dictionay().get_markers_format()} markers detection:') + aruco_tracker.print_configuration() + + load_configuration_file() + + # Load AOI 3D scene for each marker + aoi3D_scenes = {} + aoi3D_scene_edits = {} + + for marker_id, aoi_scene_filepath in args.marker_id_scene.items(): + + marker_id = int(marker_id) + + aoi3D_scenes[marker_id] = AOI3DScene.AOI3DScene() + aoi3D_scenes[marker_id].load(aoi_scene_filepath) + + aoi3D_scene_edits[marker_id] = { + 'rotation': numpy.array([0.0, 0.0, 0.0]), + 'translation': numpy.array([0.0, 0.0, 0.0]) + } + + print(f'AOI in {os.path.basename(aoi_scene_filepath)} scene related to marker #{marker_id}:') + for aoi in aoi3D_scenes[marker_id].keys(): + print(f'\t{aoi}') + + def aoi3D_scene_selector(marker_id): + return aoi3D_scenes.get(marker_id, None) + + def aoi3D_scene_edit_selector(marker_id): + return aoi3D_scene_edits.get(marker_id, None) + + # Display first frame + video_ts, video_frame = tobii_segment_video.get_frame(0) + cv.imshow(f'Segment {tobii_segment.get_id()} ArUco marker editor', video_frame.matrix) + + # Init mouse interaction variables + pointer = (0, 0) + left_click = (0, 0) + right_click = (0, 0) + right_button = False + edit_trans = False # translate + edit_coord = 0 # x + + # On mouse left left_click : update pointer position + def on_mouse_event(event, x, y, flags, param): + + nonlocal pointer + nonlocal left_click + nonlocal right_click + nonlocal right_button + + # Update pointer + pointer = (x, y) + + # Update left_click + if event == cv.EVENT_LBUTTONUP: + + left_click = pointer + + # Udpate right_button + elif event == cv.EVENT_RBUTTONDOWN: + + right_button = True + + elif event == cv.EVENT_RBUTTONUP: + + right_button = False + + # Udpate right_click + if right_button: + + right_click = pointer + + cv.setMouseCallback(f'Segment {tobii_segment.get_id()} ArUco marker editor', on_mouse_event) + + # Frame selector loop + frame_index = 0 + last_frame_index = -1 + last_frame = video_frame.copy() + force_update = False + + selected_marker_id = -1 + + try: + + while True: + + # Select a frame on change + if frame_index != last_frame_index or force_update: + + video_ts, video_frame = tobii_segment_video.get_frame(frame_index) + video_ts_ms = video_ts / 1000 + + last_frame_index = frame_index + last_frame = video_frame.copy() + + # Hide frame left and right borders before tracking to ignore markers outside focus area + cv.rectangle(video_frame.matrix, (0, 0), (int(video_frame.width/6), int(video_frame.height)), (0, 0, 0), -1) + cv.rectangle(video_frame.matrix, (int(video_frame.width*(1 - 1/6)), 0), (int(video_frame.width), int(video_frame.height)), (0, 0, 0), -1) + + # Track markers with pose estimation + aruco_tracker.track(video_frame.matrix) + + else: + + video_frame = last_frame.copy() + + # Edit fake gaze position from pointer + gaze_position = GazeFeatures.GazePosition(pointer, accuracy=2) + + # Copy video frame to edit visualisation on it with out disrupting aruco tracking + visu_frame = video_frame.copy() + + # Draw markers and pose estimation + aruco_tracker.draw(visu_frame.matrix) + + # Project 3D scene on each video frame and the visualisation frame + if aruco_tracker.get_markers_number(): + + # Write detected marker ids + cv.putText(visu_frame.matrix, f'Detected markers: {aruco_tracker.get_markers_ids()}', (20, visu_frame.height - 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) + + # Update selected marker id by left_clicking on marker + for (i, marker_id) in enumerate(aruco_tracker.get_markers_ids()): + + marker_aoi = numpy.array(aruco_tracker.get_marker_corners(i)).view(AOIFeatures.AreaOfInterest) + + if marker_aoi.looked(left_click): + + selected_marker_id = marker_id + + # Select 3D scene related to selected marker + aoi3D_scene = aoi3D_scene_selector(selected_marker_id) + + # If a marker is selected + try: + + # Retreive marker index + selected_marker_index = aruco_tracker.get_marker_index(selected_marker_id) + + if aoi3D_scene == None: + raise UserWarning('No AOI 3D scene') + + # Select scene edit + aoi3D_scene_edit = aoi3D_scene_edit_selector(selected_marker_id) + + # Edit scene + if aoi3D_scene_edit != None: + + marker_x, marker_y = aruco_tracker.get_marker_center(selected_marker_index) + + if right_button: + + pointer_delta_x, pointer_delta_y = (right_click[0] - marker_x) / (visu_frame.width/3), (marker_y - right_click[1]) / (visu_frame.width/3) + + if edit_trans: + + # Edit scene rotation + if edit_coord == 0: + aoi3D_scene_edit['rotation'] = numpy.array([pointer_delta_y, aoi3D_scene_edit['rotation'][1], aoi3D_scene_edit['rotation'][2]]) + + elif edit_coord == 1: + aoi3D_scene_edit['rotation'] = numpy.array([aoi3D_scene_edit['rotation'][0], pointer_delta_x, aoi3D_scene_edit['rotation'][2]]) + + elif edit_coord == 2: + aoi3D_scene_edit['rotation'] = numpy.array([aoi3D_scene_edit['rotation'][0], aoi3D_scene_edit['rotation'][1], -1*pointer_delta_y]) + + else: + + # Edit scene translation + if edit_coord == 0: + aoi3D_scene_edit['translation'] = numpy.array([pointer_delta_x, aoi3D_scene_edit['translation'][1], aoi3D_scene_edit['translation'][2]]) + + elif edit_coord == 1: + aoi3D_scene_edit['translation'] = numpy.array([aoi3D_scene_edit['translation'][0], pointer_delta_y, aoi3D_scene_edit['translation'][2]]) + + elif edit_coord == 2: + aoi3D_scene_edit['translation'] = numpy.array([aoi3D_scene_edit['translation'][0], aoi3D_scene_edit['translation'][1], 2*pointer_delta_y]) + + # Apply transformation + aoi3D_scene_edited = aoi3D_scene.transform(aoi3D_scene_edit['translation'], aoi3D_scene_edit['rotation']) + + cv.rectangle(visu_frame.matrix, (0, 130), (460, 450), (127, 127, 127), -1) + + # Write rotation matrix + R, _ = cv.Rodrigues(aoi3D_scene_edit['rotation']) + cv.putText(visu_frame.matrix, f'Rotation matrix:', (20, 160), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) + cv.putText(visu_frame.matrix, f'{R[0][0]:.3f} {R[0][1]:.3f} {R[0][2]:.3f}', (40, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv.LINE_AA) + cv.putText(visu_frame.matrix, f'{R[1][0]:.3f} {R[1][1]:.3f} {R[1][2]:.3f}', (40, 240), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv.LINE_AA) + cv.putText(visu_frame.matrix, f'{R[2][0]:.3f} {R[2][1]:.3f} {R[2][2]:.3f}', (40, 280), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA) + + # Write translation vector + T = aoi3D_scene_edit['translation'] + cv.putText(visu_frame.matrix, f'Translation vector:', (20, 320), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) + cv.putText(visu_frame.matrix, f'{T[0]:.3f}', (40, 360), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv.LINE_AA) + cv.putText(visu_frame.matrix, f'{T[1]:.3f}', (40, 400), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv.LINE_AA) + cv.putText(visu_frame.matrix, f'{T[2]:.3f}', (40, 440), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA) + + # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it + # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable. + aoi2D_video_scene = aoi3D_scene_edited.project(aruco_tracker.get_marker_translation(selected_marker_index), aruco_tracker.get_marker_rotation(selected_marker_index), aruco_camera.get_K()) + + # Draw scene + aoi2D_video_scene.draw(visu_frame.matrix, gaze_position, exclude=['Visualisation_Plan']) + + # Write warning related to marker pose processing + except UserWarning as e: + + cv.putText(visu_frame.matrix, f'Marker {selected_marker_id}: {e}', (20, 120), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA) + + except ValueError: + + # Write error + if selected_marker_id >= 0: + cv.putText(visu_frame.matrix, f'Marker {selected_marker_id} not found', (20, 120), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA) + + # Draw focus area + cv.rectangle(visu_frame.matrix, (int(visu_frame.width/6), 0), (int(visu_frame.width*(1-1/6)), int(visu_frame.height)), (255, 150, 150), 1) + + # Draw center + cv.line(visu_frame.matrix, (int(visu_frame.width/2) - 50, int(visu_frame.height/2)), (int(visu_frame.width/2) + 50, int(visu_frame.height/2)), (255, 150, 150), 1) + cv.line(visu_frame.matrix, (int(visu_frame.width/2), int(visu_frame.height/2) - 50), (int(visu_frame.width/2), int(visu_frame.height/2) + 50), (255, 150, 150), 1) + + # Draw pointer + cv.circle(visu_frame.matrix, gaze_position, gaze_position.accuracy, (0, 255, 255), -1) + + # Write segment timing + cv.rectangle(visu_frame.matrix, (0, 0), (550, 50), (63, 63, 63), -1) + cv.putText(visu_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) + + # Write selected marker id + if selected_marker_id >= 0: + + cv.rectangle(visu_frame.matrix, (0, 50), (550, 90), (127, 127, 127), -1) + + # Select color + if edit_coord == 0: + color_axis = (0, 0, 255) + + elif edit_coord == 1: + color_axis = (0, 255, 0) + + elif edit_coord == 2: + color_axis = (255, 0, 0) + + if edit_trans: + cv.putText(visu_frame.matrix, f'Rotate marker {selected_marker_id} around axis {edit_coord + 1}', (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, color_axis, 1, cv.LINE_AA) + else: + cv.putText(visu_frame.matrix, f'Translate marker {selected_marker_id} along axis {edit_coord + 1}', (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, color_axis, 1, cv.LINE_AA) + + # Write documentation + else: + cv.rectangle(visu_frame.matrix, (0, 50), (650, 250), (127, 127, 127), -1) + cv.putText(visu_frame.matrix, f'> Left click on marker: select scene', (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) + cv.putText(visu_frame.matrix, f'> T: translate, R: rotate', (20, 120), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) + cv.putText(visu_frame.matrix, f'> Shift + 0/1/2: select axis', (20, 160), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) + cv.putText(visu_frame.matrix, f'> Right click and drag: edit axis', (20, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) + cv.putText(visu_frame.matrix, f'> Ctrl + S: save scene', (20, 240), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) + + # Reset left_click + left_click = (0, 0) + + if args.window: + + key_pressed = cv.waitKey(1) + + #if key_pressed != -1: + # print(key_pressed) + + # Select previous frame with left arrow + if key_pressed == 2: + frame_index -= 1 + + # Select next frame with right arrow + if key_pressed == 3: + frame_index += 1 + + # Clip frame index + if frame_index < 0: + frame_index = 0 + + # Edit rotation with r key + if key_pressed == 114: + edit_trans = True + + # Edit translation with t key + if key_pressed == 116: + edit_trans = False + + # Select coordinate to edit with Shift + 0, 1 or 2 + if key_pressed == 49 or key_pressed == 50 or key_pressed == 51: + edit_coord = key_pressed - 49 + + # Save selected marker edition using 'Ctrl + s' + if key_pressed == 19: + + if selected_marker_id > 0 and aoi3D_scene_edit != None: + + aoi_scene_filepath = args.marker_id_scene[f'{selected_marker_id}'] + aoi3D_scene_edited.save(aoi_scene_filepath) + + print(f'Saving scene related to marker #{selected_marker_id} into {aoi_scene_filepath}') + + # Close window using 'Esc' key + if key_pressed == 27: + break + + # Reload tracker configuration on 'c' key + if key_pressed == 99: + load_configuration_file() + force_update = True + + # Display video + cv.imshow(f'Segment {tobii_segment.get_id()} ArUco marker editor', visu_frame.matrix) + + # Wait 1 second + time.sleep(1) + + # Exit on 'ctrl+C' interruption + except KeyboardInterrupt: + pass + + # Stop frame display + cv.destroyAllWindows() + +if __name__ == '__main__': + + main() \ No newline at end of file diff --git a/src/argaze/utils/tobii_segment_aruco_aoi_export.py b/src/argaze/utils/tobii_segment_aruco_aoi_export.py new file mode 100644 index 0000000..a3a31d0 --- /dev/null +++ b/src/argaze/utils/tobii_segment_aruco_aoi_export.py @@ -0,0 +1,435 @@ +#!/usr/bin/env python + +import argparse +import os +import json + +from argaze import DataStructures +from argaze import GazeFeatures +from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiVideo +from argaze.ArUcoMarkers import * +from argaze.AreaOfInterest import * +from argaze.utils import MiscFeatures + +import numpy +import cv2 as cv + +def main(): + """ + Track ArUco markers into Tobii Glasses Pro 2 segment video file. + For each loaded AOI scene .obj file, position the scene virtually relatively to each detected ArUco markers and project the scene into camera frame. + Then, detect if Tobii gaze point is inside any AOI. + Export AOIs video and data. + """ + + # Manage arguments + parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) + parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='segment path') + parser.add_argument('-r', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)') + parser.add_argument('-c', '--camera_calibration', metavar='CAM_CALIB', type=str, default=None, help='json camera calibration filepath') + parser.add_argument('-p', '--aruco_tracker_configuration', metavar='TRACK_CONFIG', type=str, default=None, help='json aruco tracker configuration filepath') + parser.add_argument('-md', '--marker_dictionary', metavar='MARKER_DICT', type=str, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL,DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)') + parser.add_argument('-ms', '--marker_size', metavar='MARKER_SIZE', type=float, default=6, help='aruco marker size (cm)') + parser.add_argument('-mi', '--marker_id_scene', metavar='MARKER_ID_SCENE', type=json.loads, help='{"marker": "aoi scene filepath"} dictionary') + parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)') + parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction) + args = parser.parse_args() + + if args.segment_path != None: + + # Manage markers id to track + if args.marker_id_scene == None: + print(f'Track any Aruco markers from the {args.marker_dictionary} dictionary') + else: + print(f'Track Aruco markers {list(args.marker_id_scene.keys())} from the {args.marker_dictionary} dictionary') + + # Manage destination path + destination_path = '.' + if args.output != None: + + if not os.path.exists(os.path.dirname(args.output)): + + os.makedirs(os.path.dirname(args.output)) + print(f'{os.path.dirname(args.output)} folder created') + + destination_path = args.output + + else: + + destination_path = args.segment_path + + # Export into a dedicated time range folder + if args.time_range[1] != None: + timerange_path = f'[{int(args.time_range[0])}s - {int(args.time_range[1])}s]' + else: + timerange_path = f'[all]' + + destination_path = f'{destination_path}/{timerange_path}' + + if not os.path.exists(destination_path): + + os.makedirs(destination_path) + print(f'{destination_path} folder created') + + vs_data_filepath = f'{destination_path}/visual_scan.csv' + vs_visu_filepath = f'{destination_path}/visual_scan_marker_%d.jpg' + vs_video_filepath = f'{destination_path}/visual_scan.mp4' + + # Load a tobii segment + tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None) + + # Load a tobii segment video + tobii_segment_video = tobii_segment.load_video() + print(f'Video properties:\n\tduration: {tobii_segment_video.get_duration()/1e6} s\n\twidth: {tobii_segment_video.get_width()} px\n\theight: {tobii_segment_video.get_height()} px') + + # Load a tobii segment data + tobii_segment_data = tobii_segment.load_data() + + print(f'Loaded data count:') + for name in tobii_segment_data.keys(): + print(f'\t{name}: {len(tobii_segment_data[name])} data') + + # Access to timestamped gaze positions data buffer + tobii_ts_gaze_positions = tobii_segment_data['GazePosition'] + + # Access to timestamped gaze 3D positions data buffer + tobii_ts_gaze_positions_3d = tobii_segment_data['GazePosition3D'] + + # Access to timestamped head rotations data buffer + tobii_ts_head_rotations = tobii_segment_data['Gyroscope'] + + # Prepare video exportation at the same format than segment video + output_video = TobiiVideo.TobiiVideoOutput(vs_video_filepath, tobii_segment_video.get_stream()) + + # Create aruco camera + aruco_camera = ArUcoCamera.ArUcoCamera() + + # Load calibration file + if args.camera_calibration != None: + + aruco_camera.load_calibration_file(args.camera_calibration) + + else: + + raise ValueError('.json camera calibration filepath required. Use -c option.') + + # Create aruco tracker + aruco_tracker = ArUcoTracker.ArUcoTracker(args.marker_dictionary, args.marker_size, aruco_camera) + + # Load specific configuration file + if args.aruco_tracker_configuration != None: + + aruco_tracker.load_configuration_file(args.aruco_tracker_configuration) + + print(f'ArUcoTracker configuration for {aruco_tracker.get_markers_dictionay().get_markers_format()} markers detection:') + aruco_tracker.print_configuration() + + # Load AOI 3D scene for each marker and create a AOI 2D scene and frame when a 'Visualisation_Plan' AOI exist + aoi3D_scenes = {} + aoi2D_visu_scenes = {} + aoi2D_visu_frames = {} + + for marker_id, aoi_scene_filepath in args.marker_id_scene.items(): + + marker_id = int(marker_id) + + aoi3D_scenes[marker_id] = AOI3DScene.AOI3DScene() + aoi3D_scenes[marker_id].load(aoi_scene_filepath) + + print(f'AOI in {os.path.basename(aoi_scene_filepath)} scene related to marker #{marker_id}:') + for aoi in aoi3D_scenes[marker_id].keys(): + + # If a 'Visualisation_Plan' AOI exist + # TODO: document this deep feature !!! + if aoi == 'Visualisation_Plan': + + print(f'\tVisualisation_Plan detected: a visual scan picture will be output for this marker.') + + # Create a visual scan visualisation frame + visu_width, visu_height = 1920, 1080 + scene_width, scene_height, __ = aoi3D_scenes[marker_id].size() + + aoi2D_visu_frames[marker_id] = numpy.full((visu_height, visu_width, 3), 255, dtype=numpy.uint8) + + if args.time_range != (0., None): + cv.putText(aoi2D_visu_frames[marker_id], f'Segment time range: {int(args.time_range[0] * 1000)} - {int(args.time_range[1] * 1000)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv.LINE_AA) + + # Project 3D scene onto the visualisation plan + aoi3D_scene_rotation = numpy.array([[-numpy.pi, 0.0, 0.0]]) + aoi3D_scene_translation = aoi3D_scenes[marker_id].center()*[-1, 1, 0] + [0, 0, scene_height] + + # Edit a projection matrix for the reference frame + K0 = numpy.array([[visu_height, 0.0, visu_width/2], [0.0, visu_height, visu_height/2], [0.0, 0.0, 1.0]]) + + aoi2D_visu_scenes[marker_id] = aoi3D_scenes[marker_id].project(aoi3D_scene_translation, aoi3D_scene_rotation, K0) + + for name, aoi in aoi2D_visu_scenes[marker_id].items(): + if name != 'Visualisation_Plan': + aoi.draw(aoi2D_visu_frames[marker_id], (0, 0, 0)) + + else: + + print(f'\t{aoi}') + + def aoi3D_scene_selector(marker_id): + return aoi3D_scenes.get(marker_id, None) + + def aoi2D_visu_scene_selector(marker_id): + return aoi2D_visu_scenes.get(marker_id, None) + + def aoi2D_visu_frame_selector(marker_id): + return aoi2D_visu_frames.get(marker_id, None) + + # Create timestamped buffer to store AOIs scene in time + ts_aois_scenes = AOIFeatures.TimeStampedAOIScenes() + + # Create timestamped buffer to store gaze positions in time + ts_gaze_positions = GazeFeatures.TimeStampedGazePositions() + + # !!! the parameters below are specific to the TobiiGlassesPro2 !!! + # Reference : https://www.biorxiv.org/content/10.1101/299925v1 + tobii_accuracy = 1.42 # degree + tobii_precision = 0.34 # degree + tobii_camera_hfov = 82 # degree + tobii_visual_hfov = 160 # degree + + # Video and data replay loop + try: + + # Initialise progress bar + MiscFeatures.printProgressBar(0, tobii_segment_video.get_duration()/1000, prefix = 'Progress:', suffix = 'Complete', length = 100) + + head_moving = False + head_movement_last = 0. + + # Iterate on video frames + for video_ts, video_frame in tobii_segment_video.frames(): + + video_ts_ms = video_ts / 1000 + + # Copy video frame to edit visualisation on it without disrupting aruco tracking + visu_frame = video_frame.copy() + + # Process video and data frame + try: + + # Get nearest head rotation before video timestamp and remove all head rotations before + _, nearest_head_rotation = tobii_ts_head_rotations.pop_first_until(video_ts) + + # Calculate head movement considering only head yaw and pitch + head_movement = numpy.array(nearest_head_rotation.value) + head_movement_px = head_movement.astype(int) + head_movement_norm = numpy.linalg.norm(head_movement[0:2]) + + # Draw movement vector + cv.line(visu_frame.matrix, (int(visu_frame.width/2), int(visu_frame.height/2)), (int(visu_frame.width/2) + head_movement_px[1], int(visu_frame.height/2) - head_movement_px[0]), (150, 150, 150), 3) + + # Head movement detection hysteresis + # TODO : pass the threshold value as argument + if not head_moving and head_movement_norm > 50: + head_moving = True + + if head_moving and head_movement_norm < 10: + head_moving = False + + # When head is moving, ArUco tracking could return bad pose estimation and so bad AOI scene projection + if head_moving: + raise AOIFeatures.AOISceneMissing('Head is moving') + + # Get nearest gaze position before video timestamp and remove all gaze positions before + _, nearest_gaze_position = tobii_ts_gaze_positions.pop_first_until(video_ts) + + # Ignore frame when gaze position is not valid + if nearest_gaze_position.validity == 1: + raise GazeFeatures.GazePositionMissing('Unvalid gaze position') + + gaze_position_pixel = GazeFeatures.GazePosition( (int(nearest_gaze_position.value[0] * visu_frame.width), int(nearest_gaze_position.value[1] * visu_frame.height)) ) + + # Draw gaze position + cv.circle(visu_frame.matrix, gaze_position_pixel, 2, (0, 255, 255), -1) + + # Get nearest gaze position 3D before video timestamp and remove all gaze positions before + _, nearest_gaze_position_3d = tobii_ts_gaze_positions_3d.pop_first_until(video_ts) + + # Ignore frame when gaze position 3D is not valid + if nearest_gaze_position_3d.validity == 1: + raise GazeFeatures.GazePositionMissing('Unvalid gaze position 3D') + + gaze_accuracy_mm = numpy.tan(numpy.deg2rad(tobii_accuracy)) * nearest_gaze_position_3d.value[2] + tobii_camera_hfov_mm = numpy.tan(numpy.deg2rad(tobii_camera_hfov / 2)) * nearest_gaze_position_3d.value[2] + + gaze_position_pixel.accuracy = round(visu_frame.width * float(gaze_accuracy_mm) / float(tobii_camera_hfov_mm)) + + # Draw gaze accuracy + cv.circle(visu_frame.matrix, gaze_position_pixel, gaze_position_pixel.accuracy, (0, 255, 255), 1) + + # Store gaze position in millisecond for further visual scan processing + ts_gaze_positions[round(video_ts_ms)] = gaze_position_pixel + + # Hide frame left and right borders before tracking to ignore markers outside focus area + cv.rectangle(video_frame.matrix, (0, 0), (int(video_frame.width/6), int(video_frame.height)), (0, 0, 0), -1) + cv.rectangle(video_frame.matrix, (int(video_frame.width*(1 - 1/6)), 0), (int(video_frame.width), int(video_frame.height)), (0, 0, 0), -1) + + # Track markers with pose estimation and draw them + aruco_tracker.track(video_frame.matrix) + aruco_tracker.draw(visu_frame.matrix) + + # When no marker is detected, no AOI scene projection can't be done + if aruco_tracker.get_markers_number() == 0: + raise AOIFeatures.AOISceneMissing('No marker detected') + + # Store aoi 2D video for further scene merging + aoi2D_dict = {} + + # Project 3D scene on each video frame and the visualisation frame + for (i, marker_id) in enumerate(aruco_tracker.get_markers_ids()): + + # Copy 3D scene related to detected marker + aoi3D_scene = aoi3D_scene_selector(marker_id) + + if aoi3D_scene == None: + continue + + # Transform scene into camera referential + aoi3D_camera = aoi3D_scene.transform(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i)) + + # Get aoi inside vision cone field + cone_vision_height_cm = nearest_gaze_position_3d.value[2]/10 # cm + cone_vision_radius_cm = numpy.tan(numpy.deg2rad(tobii_visual_hfov / 2)) * cone_vision_height_cm + + aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(cone_vision_radius_cm, cone_vision_height_cm) + + # Keep only aoi inside vision cone field + aoi3D_scene = aoi3D_scene.copy(exclude=aoi3D_outside.keys()) + + # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it + # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable. + aoi2D_video_scene = aoi3D_scene.project(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i), aruco_camera.get_K()) + + # Store each 2D aoi for further scene merging + for name, aoi in aoi2D_video_scene.items(): + + if name not in aoi2D_dict.keys(): + aoi2D_dict[name] = [] + + aoi2D_dict[name].append(aoi.clockwise()) + + # Select 2D visu scene if there is one for the detected marker + aoi2D_visu_scene = aoi2D_visu_scene_selector(marker_id) + aoi2D_visu_frame = aoi2D_visu_frame_selector(marker_id) + + if aoi2D_visu_scene == None: + continue + + look_at = aoi2D_video_scene['Visualisation_Plan'].look_at(gaze_position_pixel) + + visu_gaze_pixel = aoi2D_visu_scene['Visualisation_Plan'].looked_pixel(look_at) + cv.circle(aoi2D_visu_frame, visu_gaze_pixel, 4, (0, 0, 255), -1) + + # Merge all 2D aoi into a single 2D scene + aoi2D_merged_scene = AOI2DScene.AOI2DScene() + for name, aoi_array in aoi2D_dict.items(): + aoi2D_merged_scene[name] = numpy.sum(aoi_array, axis=0) / len(aoi_array) + + aoi2D_merged_scene.draw(visu_frame.matrix, gaze_position_pixel, exclude=['Visualisation_Plan']) + + # When the merged scene is empty + if len(aoi2D_merged_scene.keys()) == 0: + raise AOIFeatures.AOISceneMissing('Scene is empty') + + # Store 2D merged scene at this time in millisecond + ts_aois_scenes[round(video_ts_ms)] = aoi2D_merged_scene + + # Raised when gaze data is missing + except GazeFeatures.GazePositionMissing as e: + + # Store missing gaze data exception + ts_gaze_positions[round(video_ts_ms)] = e + + cv.rectangle(visu_frame.matrix, (0, 50), (550, 100), (127, 127, 127), -1) + cv.putText(visu_frame.matrix, str(e), (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) + + # Raised when aoi scene is missing + except AOIFeatures.AOISceneMissing as e: + + # Store missing scene exception + ts_aois_scenes[round(video_ts_ms)] = e + + cv.rectangle(visu_frame.matrix, (0, 50), (550, 100), (127, 127, 127), -1) + cv.putText(visu_frame.matrix, str(e), (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) + + # Raised when buffer is empty + except ValueError: + pass + + # Draw focus area + cv.rectangle(visu_frame.matrix, (int(video_frame.width/6), 0), (int(visu_frame.width*(1-1/6)), int(visu_frame.height)), (255, 150, 150), 1) + + # Draw center + cv.line(visu_frame.matrix, (int(visu_frame.width/2) - 50, int(visu_frame.height/2)), (int(visu_frame.width/2) + 50, int(visu_frame.height/2)), (255, 150, 150), 1) + cv.line(visu_frame.matrix, (int(visu_frame.width/2), int(visu_frame.height/2) - 50), (int(visu_frame.width/2), int(visu_frame.height/2) + 50), (255, 150, 150), 1) + + # Write segment timing + cv.rectangle(visu_frame.matrix, (0, 0), (550, 50), (63, 63, 63), -1) + cv.putText(visu_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) + + if args.window: + + # Close window using 'Esc' key + if cv.waitKey(1) == 27: + break + + # Display visualisation + cv.imshow(f'Segment {tobii_segment.get_id()} ArUco AOI', visu_frame.matrix) + + # Display each visual scan frame + for marker_id, aoi2D_visu_frame in aoi2D_visu_frames.items(): + cv.imshow(f'Segment {tobii_segment.get_id()} visual scan for marker {marker_id}', visu_frame.matrix) + + # Write video + output_video.write(visu_frame.matrix) + + # Update Progress Bar + progress = video_ts_ms - int(args.time_range[0] * 1000) + MiscFeatures.printProgressBar(progress, tobii_segment_video.get_duration()/1000, prefix = 'Progress:', suffix = 'Complete', length = 100) + + # Exit on 'ctrl+C' interruption + except KeyboardInterrupt: + pass + + # Stop frame display + cv.destroyAllWindows() + + # End output video file + output_video.close() + + # Print aruco tracking metrics + print('\nAruco marker tracking metrics') + try_count, tracked_counts, rejected_counts = aruco_tracker.get_track_metrics() + + for marker_id, tracked_count in tracked_counts.items(): + print(f'Markers {marker_id} has been detected in {tracked_count} / {try_count} frames ({round(100 * tracked_count / try_count, 2)} %)') + + for marker_id, rejected_count in rejected_counts.items(): + print(f'Markers {marker_id} has been rejected in {rejected_count} / {try_count} frames ({round(100 * rejected_count / try_count, 2)} %)') + + # Build visual scan based on a pointer position + visual_scan = GazeFeatures.PointerBasedVisualScan(ts_aois_scenes, ts_gaze_positions) + print(f'{len(visual_scan.steps())} visual scan steps found') + + # Export visual scan data + visual_scan.export_as_csv(vs_data_filepath) + print(f'Visual scan data saved into {vs_data_filepath}') + + # Export each visual scan picture + for marker_id, aoi2D_visu_frame in aoi2D_visu_frames.items(): + cv.imwrite(vs_visu_filepath % marker_id, visu_frame.matrix) + print(f'Visual scan picture for marker {marker_id} saved into {vs_visu_filepath % marker_id}') + + # Notify when the visual scan video has been exported + print(f'Visual scan video saved into {vs_video_filepath}') + + +if __name__ == '__main__': + + main() \ No newline at end of file diff --git a/src/argaze/utils/tobii_segment_display.py b/src/argaze/utils/tobii_segment_display.py new file mode 100644 index 0000000..86d0057 --- /dev/null +++ b/src/argaze/utils/tobii_segment_display.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python + +import argparse + +from argaze import GazeFeatures +from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiVideo, TobiiData +from argaze.utils import MiscFeatures + +import numpy + +import cv2 as cv + +def main(): + """ + Replay Tobii segment video + """ + + # manage arguments + parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) + parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='segment path') + parser.add_argument('-r', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)') + parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction) + args = parser.parse_args() + + if args.segment_path != None: + + # Load a tobii segment + tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None) + + # Load a tobii segment video + tobii_segment_video = tobii_segment.load_video() + print(f'Video properties:\n\tduration: {tobii_segment_video.get_duration() / 1e6} s\n\twidth: {tobii_segment_video.get_width()} px\n\theight: {tobii_segment_video.get_height()} px') + + # Load a tobii segment data + tobii_segment_data = tobii_segment.load_data() + + print(f'Loaded data count:') + for name in tobii_segment_data.keys(): + print(f'\t{name}: {len(tobii_segment_data[name])} data') + + # Access to timestamped gaze position data buffer + tobii_ts_gaze_positions = tobii_segment_data['GazePosition'] + + # Access to timestamped pupil diameter data buffer + tobii_ts_pupil_diameter = tobii_segment_data['PupilDiameter'] + + # Access to timestamped events data buffer + tobii_ts_events = tobii_segment_data['Event'] + + # Video and data replay loop + try: + + # Initialise progress bar + MiscFeatures.printProgressBar(0, tobii_segment_video.get_duration() / 1e3, prefix = 'Video progression:', suffix = 'Complete', length = 100) + + # Iterate on video frames + for video_ts, video_frame in tobii_segment_video.frames(): + + video_ts_ms = video_ts / 1e3 + + # Write segment timing + cv.putText(video_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) + + try: + + # Get closest gaze position before video timestamp and remove all gaze positions before + closest_gaze_ts, closest_gaze_position = tobii_ts_gaze_positions.pop_first_until(video_ts) + + # Get closest pupil diameter before video timestamp and remove all pupil diameters before + closest_pupil_ts, closest_pupil_diameter = tobii_ts_pupil_diameter.pop_first_until(video_ts) + + # Draw gaze position + gaze_position = (int(closest_gaze_position.value[0] * video_frame.width), int(closest_gaze_position.value[1] * video_frame.height)) + pupil_diameter = int((10 - closest_pupil_diameter.value) / 2) + + cv.circle(video_frame.matrix, gaze_position, 10, (0, 255, 255), pupil_diameter) + + # Wait for gaze position + except ValueError: + continue + + try: + + # Get closest event before video timestamp and remove all gaze positions before + closest_event_ts, closest_event = tobii_ts_events.pop_first_until(video_ts) + + print(closest_event_ts / 1e3, closest_event) + + # Write events + cv.putText(video_frame.matrix, str(closest_event), (20, 140), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) + + except ValueError: + pass + + if args.window: + + # Close window using 'Esc' key + if cv.waitKey(1) == 27: + break + + cv.imshow(f'Segment {tobii_segment.get_id()} video', video_frame.matrix) + + # Update Progress Bar + progress = video_ts_ms - int(args.time_range[0] * 1e3) + MiscFeatures.printProgressBar(progress, tobii_segment_video.get_duration() / 1e3, prefix = 'Video progression:', suffix = 'Complete', length = 100) + + # Exit on 'ctrl+C' interruption + except KeyboardInterrupt: + pass + + # Stop frame display + cv.destroyAllWindows() + +if __name__ == '__main__': + + main() \ No newline at end of file diff --git a/src/argaze/utils/tobii_segment_gaze_movements_export.py b/src/argaze/utils/tobii_segment_gaze_movements_export.py new file mode 100644 index 0000000..b0c273a --- /dev/null +++ b/src/argaze/utils/tobii_segment_gaze_movements_export.py @@ -0,0 +1,255 @@ +#!/usr/bin/env python + +import argparse +import os + +from argaze import GazeFeatures +from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiVideo +from argaze.utils import MiscFeatures + +import cv2 as cv +import numpy + +def main(): + """ + Analyse Tobii segment fixations + """ + + # Manage arguments + parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) + parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='path to a tobii segment folder') + parser.add_argument('-r', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)') + parser.add_argument('-d', '--dispersion_threshold', metavar='DISPERSION_THRESHOLD', type=int, default=10, help='dispersion threshold in pixel') + parser.add_argument('-t', '--duration_threshold', metavar='DURATION_THRESHOLD', type=int, default=100, help='duration threshold in millisecond') + parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)') + parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction) + args = parser.parse_args() + + if args.segment_path != None: + + # Manage destination path + destination_path = '.' + if args.output != None: + + if not os.path.exists(os.path.dirname(args.output)): + + os.makedirs(os.path.dirname(args.output)) + print(f'{os.path.dirname(args.output)} folder created') + + destination_path = args.output + + else: + + destination_path = args.segment_path + + # Export into a dedicated time range folder + if args.time_range[1] != None: + timerange_path = f'[{int(args.time_range[0])}s - {int(args.time_range[1])}s]' + else: + timerange_path = f'[all]' + + destination_path = f'{destination_path}/{timerange_path}' + + if not os.path.exists(destination_path): + + os.makedirs(destination_path) + print(f'{destination_path} folder created') + + fixations_filepath = f'{destination_path}/movements_fixations.csv' + saccades_filepath = f'{destination_path}/movements_saccades.csv' + + gaze_status_filepath = f'{destination_path}/gaze_status.csv' + gaze_status_video_filepath = f'{destination_path}/gaze_status.mp4' + + # Load a tobii segment + tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None) + + # Load a tobii segment video + tobii_segment_video = tobii_segment.load_video() + print(f'Video properties:\n\tduration: {tobii_segment_video.get_duration()/1e6} s\n\twidth: {tobii_segment_video.get_width()} px\n\theight: {tobii_segment_video.get_height()} px') + + # Load a tobii segment data + tobii_segment_data = tobii_segment.load_data() + + print(f'Loaded data count:') + for name in tobii_segment_data.keys(): + print(f'\t{name}: {len(tobii_segment_data[name])} data') + + # Access to timestamped gaze position data buffer + tobii_ts_gaze_positions = tobii_segment_data['GazePosition'] + + # Access to timestamped gaze 3D positions data buffer + tobii_ts_gaze_positions_3d = tobii_segment_data['GazePosition3D'] + + # Format tobii gaze position in pixel and store them using millisecond unit timestamp + ts_gaze_positions = GazeFeatures.TimeStampedGazePositions() + + # !!! the parameters below are specific to the TobiiGlassesPro2 !!! + # Reference : https://www.biorxiv.org/content/10.1101/299925v1 + tobii_accuracy = 1.42 # degree + tobii_precision = 0.34 # degree + tobii_camera_hfov = 82 # degree + + for ts, tobii_gaze_position in tobii_ts_gaze_positions.items(): + + if tobii_gaze_position.validity == 0: + + gaze_position_pixel = GazeFeatures.GazePosition( (int(tobii_gaze_position.value[0] * tobii_segment_video.get_width()), int(tobii_gaze_position.value[1] * tobii_segment_video.get_height())) ) + + ts_gaze_positions[ts/1000] = gaze_position_pixel + + for ts, tobii_ts_gaze_position_3d in tobii_ts_gaze_positions_3d.items(): + + if tobii_ts_gaze_position_3d.validity == 0: + + gaze_accuracy_mm = numpy.sin(numpy.deg2rad(tobii_accuracy)) * tobii_ts_gaze_position_3d.value[2] + tobii_camera_hfov_mm = numpy.sin(numpy.deg2rad(tobii_camera_hfov)) * tobii_ts_gaze_position_3d.value[2] + + ts_gaze_positions[ts/1000].accuracy = round(tobii_segment_video.get_width() * float(gaze_accuracy_mm) / float(tobii_camera_hfov_mm)) + + print(f'Dispersion threshold: {args.dispersion_threshold}') + print(f'Duration threshold: {args.duration_threshold}') + + # Start movement identification + movement_identifier = GazeFeatures.DispersionBasedMovementIdentifier(ts_gaze_positions, args.dispersion_threshold, args.duration_threshold) + fixations = GazeFeatures.TimeStampedMovements() + saccades = GazeFeatures.TimeStampedMovements() + gaze_status = GazeFeatures.TimeStampedGazeStatus() + + # Initialise progress bar + MiscFeatures.printProgressBar(0, int(tobii_segment_video.get_duration()/1000), prefix = 'Movements identification:', suffix = 'Complete', length = 100) + + for item in movement_identifier: + + if isinstance(item, GazeFeatures.DispersionBasedMovementIdentifier.DispersionBasedFixation): + + start_ts, start_position = item.positions.get_first() + + fixations[start_ts] = item + + for ts, position in item.positions.items(): + + gaze_status[ts] = GazeFeatures.GazeStatus(position, 'Fixation', len(fixations)) + + elif isinstance(item, GazeFeatures.DispersionBasedMovementIdentifier.DispersionBasedSaccade): + + start_ts, start_position = item.positions.get_first() + end_ts, end_position = item.positions.get_last() + + saccades[start_ts] = item + + gaze_status[start_ts] = GazeFeatures.GazeStatus(start_position, 'Saccade', len(saccades)) + gaze_status[end_ts] = GazeFeatures.GazeStatus(end_position, 'Saccade', len(saccades)) + + else: + continue + + # Update Progress Bar + progress = ts - int(args.time_range[0] * 1000) + MiscFeatures.printProgressBar(progress, int(tobii_segment_video.get_duration()/1000), prefix = 'Movements identification:', suffix = 'Complete', length = 100) + + print(f'\n{len(fixations)} fixations and {len(saccades)} saccades found') + + # Export fixations analysis + fixations.export_as_csv(fixations_filepath) + print(f'Fixations saved into {fixations_filepath}') + + # Export saccades analysis + saccades.export_as_csv(saccades_filepath) + print(f'Saccades saved into {saccades_filepath}') + + # Export gaze status analysis + gaze_status.export_as_csv(gaze_status_filepath) + print(f'Gaze status saved into {gaze_status_filepath}') + + # Prepare video exportation at the same format than segment video + output_video = TobiiVideo.TobiiVideoOutput(gaze_status_video_filepath, tobii_segment_video.get_stream()) + + # Video and data loop + try: + + # Initialise progress bar + MiscFeatures.printProgressBar(0, tobii_segment_video.get_duration()/1000, prefix = 'Video with movements processing:', suffix = 'Complete', length = 100) + + current_fixation_ts, current_fixation = fixations.pop_first() + current_fixation_time_counter = 0 + + current_saccade_ts, current_saccade = saccades.pop_first() + + # Iterate on video frames + for video_ts, video_frame in tobii_segment_video.frames(): + + video_ts_ms = video_ts / 1000 + + # write segment timing + cv.putText(video_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) + + # write movement identification parameters + cv.putText(video_frame.matrix, f'Dispersion threshold: {args.dispersion_threshold} px', (20, 100), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) + cv.putText(video_frame.matrix, f'Duration threshold: {args.duration_threshold} ms', (20, 140), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) + + # Draw current fixation + if len(fixations) > 0: + + if video_ts_ms > current_fixation_ts + current_fixation.duration: + + current_fixation_ts, current_fixation = fixations.pop_first() + current_fixation_time_counter = 0 + + # Draw saccade + if len(saccades) > 0: + + if video_ts_ms > current_saccade_ts + current_saccade.duration: + + current_saccade_ts, current_saccade = saccades.pop_first() + start_ts, start_position = current_saccade.positions.pop_first() + end_ts, end_position = current_saccade.positions.pop_first() + + cv.line(video_frame.matrix, start_position, end_position, (0, 0, 255), 2) + + else: + + current_fixation_time_counter += 1 + + cv.circle(video_frame.matrix, current_fixation.centroid, current_fixation.dispersion + current_fixation_time_counter, (0, 255, 0), 1) + + try: + + # Get closest gaze position before video timestamp and remove all gaze positions before + _, nearest_gaze_position = ts_gaze_positions.pop_first_until(video_ts_ms) + + # Draw gaze position and precision + cv.circle(video_frame.matrix, nearest_gaze_position, 2, (0, 255, 255), -1) + cv.circle(video_frame.matrix, nearest_gaze_position, nearest_gaze_position.accuracy, (0, 255, 255), 1) + + # Wait for gaze position + except ValueError: + pass + + if args.window: + + # Close window using 'Esc' key + if cv.waitKey(1) == 27: + break + + # Display video + cv.imshow(f'Segment {tobii_segment.get_id()} movements', video_frame.matrix) + + # Write video + output_video.write(video_frame.matrix) + + # Update Progress Bar + progress = video_ts_ms - int(args.time_range[0] * 1000) + MiscFeatures.printProgressBar(progress, tobii_segment_video.get_duration()/1000, prefix = 'Video with movements processing:', suffix = 'Complete', length = 100) + + # Exit on 'ctrl+C' interruption + except KeyboardInterrupt: + pass + + # End output video file + output_video.close() + print(f'\nVideo with movements saved into {gaze_status_video_filepath}') + +if __name__ == '__main__': + + main() \ No newline at end of file diff --git a/src/argaze/utils/tobii_segment_gaze_plot_export.py b/src/argaze/utils/tobii_segment_gaze_plot_export.py new file mode 100644 index 0000000..d28bafb --- /dev/null +++ b/src/argaze/utils/tobii_segment_gaze_plot_export.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python + +import argparse +import os +import json + +from argaze import DataStructures +from argaze import GazeFeatures +from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiVideo +from argaze.utils import MiscFeatures + +import pandas +import matplotlib.pyplot as mpyplot +import matplotlib.patches as mpatches + +def main(): + """ + """ + + # Manage arguments + parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) + parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='segment path') + parser.add_argument('-r', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)') + parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)') + args = parser.parse_args() + + if args.segment_path != None: + + # Manage destination path + destination_path = '.' + if args.output != None: + + if not os.path.exists(os.path.dirname(args.output)): + + os.makedirs(os.path.dirname(args.output)) + print(f'{os.path.dirname(args.output)} folder created') + + destination_path = args.output + + else: + + destination_path = args.segment_path + + data_plots_filepath = f'{destination_path}/plots.svg' + + # Load a tobii segment + tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None) + + # Load a tobii segment video + tobii_segment_video = tobii_segment.load_video() + print(f'Video properties:\n\tduration: {tobii_segment_video.get_duration() / 1e6} s\n\twidth: {tobii_segment_video.get_width()} px\n\theight: {tobii_segment_video.get_height()} px') + + # Load a tobii segment data + tobii_segment_data = tobii_segment.load_data() + + print(f'Loaded data count:') + for name in tobii_segment_data.keys(): + print(f'\t{name}: {len(tobii_segment_data[name])} data') + + # Edit figure + figure_width = min( 4 * tobii_segment_video.get_duration() / 1e6, 56) # maximal width to display: 56 inches at 144 dpi < 2^16 pixels + data_sample = 8064 # 56 inches * 144 dpi = 8064 data can be displayed at max + figure = mpyplot.figure(figsize=(figure_width, 35), dpi=144) + + # Plot pupil diameter data + subplot = figure.add_subplot(711) + subplot.set_title('Pupil diameter', loc='left') + subplot.set_ylim(0, 10) + patches = tobii_segment_data['PupilDiameter'].plot(names=['value'], colors=['#FFD800'], samples=data_sample) + subplot.legend(handles=patches, loc='upper left') + + # Annotate events + df_ts_events = tobii_segment_data['Event'].as_dataframe() + + if len(df_ts_events) > 0: + + for ts, event_type, event_tag in zip(df_ts_events.index, df_ts_events.type, df_ts_events.tag): + subplot.annotate(f'{event_type}\n{event_tag}', xy=(ts, 7), horizontalalignment="left", verticalalignment="top") + subplot.vlines(ts, 0, 6, color="tab:red", linewidth=1) + + # Plot pupil center data + subplot = figure.add_subplot(712) + subplot.set_title('Pupil center', loc='left') + subplot.set_ylim(-40, -20) + patches = tobii_segment_data['PupilCenter'].plot(names=['x','y','z'], colors=['#276FB6','#9427B6','#888888'], split={'value':['x','y','z']}, samples=data_sample) + subplot.legend(handles=patches, loc='upper left') + + # Plot gaze position data + subplot = figure.add_subplot(713) + subplot.set_title('Gaze position', loc='left') + subplot.set_ylim(0., 1.) + patches = tobii_segment_data['GazePosition'].plot(names=['x','y'], colors=['#276FB6','#9427B6'], split={'value':['x','y']}, samples=data_sample) + subplot.legend(handles=patches, loc='upper left') + + # Plot gaze direction data + subplot = figure.add_subplot(714) + subplot.set_title('Gaze direction', loc='left') + patches = tobii_segment_data['GazeDirection'].plot(names=['x','y','z'], colors=['#276FB6','#9427B6','#888888'], split={'value':['x','y','z']}, samples=data_sample) + subplot.legend(handles=patches, loc='upper left') + + # Plot gaze direction data + subplot = figure.add_subplot(715) + subplot.set_title('Gaze position 3D', loc='left') + patches = tobii_segment_data['GazePosition3D'].plot(names=['x','y','z'], colors=['#276FB6','#9427B6','#888888'], split={'value':['x','y','z']}, samples=data_sample) + subplot.legend(handles=patches, loc='upper left') + + # Plot accelerometer data + subplot = figure.add_subplot(716) + subplot.set_title('Accelerometer', loc='left') + patches = tobii_segment_data['Accelerometer'].plot(names=['x','y','z'], colors=['#276FB6','#9427B6','#888888'], split={'value':['x','y','z']}, samples=data_sample) + subplot.legend(handles=patches, loc='upper left') + + # Plot accelerometer data + subplot = figure.add_subplot(717) + subplot.set_title('Gyroscope', loc='left') + patches = tobii_segment_data['Gyroscope'].plot(names=['x','y','z'], colors=['#276FB6','#9427B6','#888888'], split={'value':['x','y','z']}, samples=data_sample) + subplot.legend(handles=patches, loc='upper left') + + # Export figure + mpyplot.tight_layout() + mpyplot.savefig(data_plots_filepath) + mpyplot.close('all') + + print(f'\nData plots saved into {data_plots_filepath}') + +if __name__ == '__main__': + + main() diff --git a/src/argaze/utils/tobii_segment_record.py b/src/argaze/utils/tobii_segment_record.py new file mode 100644 index 0000000..a45727b --- /dev/null +++ b/src/argaze/utils/tobii_segment_record.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python + +import argparse +import threading +import time +import random + +from argaze.TobiiGlassesPro2 import TobiiController +from argaze.utils import MiscFeatures + +def main(): + """ + Record a Tobii Glasses Pro 2 session on Tobii interface's SD Card + """ + + # Manage arguments + parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) + parser.add_argument('-t', '--tobii_ip', metavar='TOBII_IP', type=str, default='192.168.1.10', help='tobii glasses ip') + parser.add_argument('-p', '--project_name', metavar='PROJECT_NAME', type=str, default=TobiiController.DEFAULT_PROJECT_NAME, help='project name') + parser.add_argument('-u', '--participant_name', metavar='PARTICIPANT_NAME', type=str, default=TobiiController.DEFAULT_PARTICIPANT_NAME, help='participant name') + args = parser.parse_args() + + # Create tobii controller + tobii_controller = TobiiController.TobiiController(args.tobii_ip, args.project_name, args.participant_name) + + # Calibrate tobii glasses + tobii_controller.calibrate() + + # Create recording + recording_id = tobii_controller.create_recording(args.participant_name) + + # Start recording + tobii_controller.start_recording(recording_id) + print('Recording started') + + # Define loop + last_battery_level = 0 + time_count = 0 + + exit = MiscFeatures.ExitSignalHandler() + print('Waiting for Ctrl+C to quit...\n') + + while not exit.status(): + + # Print storage info each minutes + if time_count % 60 == 0: + + print(tobii_controller.get_storage_info()) + + # print battery level each time it changes + # send it as experimental variable + battery_level = tobii_controller.get_battery_level() + if battery_level != last_battery_level: + + print(tobii_controller.get_battery_info()) + + tobii_controller.send_variable('battery', battery_level) + + last_battery_level = battery_level + + # send random event each 3 - 10 seconds + if time_count % random.randint(3, 10) == 0: + + print('Send random event') + + tobii_controller.send_event('random') + + # Sleep 1 second + time.sleep(1) + time_count += 1 + + # Stop recording + tobii_controller.stop_recording(recording_id) + print('Recording stopped') + +if __name__ == '__main__': + + main() \ No newline at end of file diff --git a/src/argaze/utils/tobii_stream_aruco_aoi_display.py b/src/argaze/utils/tobii_stream_aruco_aoi_display.py new file mode 100644 index 0000000..6391a0d --- /dev/null +++ b/src/argaze/utils/tobii_stream_aruco_aoi_display.py @@ -0,0 +1,104 @@ + #!/usr/bin/env python + +import argparse +import os + +from argaze import DataStructures, GazeFeatures +from argaze.TobiiGlassesPro2 import * +from argaze.ArUcoMarkers import ArUcoTracker, ArUcoCamera +from argaze.AreaOfInterest import * +from argaze.TobiiGlassesPro2 import * + +import cv2 as cv +import numpy + +from ivy.std_api import * + +def main(): + """ + Track any ArUco marker into Tobii Glasses Pro 2 camera video stream. + """ + + # Manage arguments + parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) + parser.add_argument('-t', '--tobii_ip', metavar='TOBII_IP', type=str, default='192.168.1.10', help='tobii glasses ip') + parser.add_argument('-c', '--camera_calibration', metavar='CAM_CALIB', type=str, default='tobii_camera.json', help='json camera calibration filepath') + parser.add_argument('-d', '--dictionary', metavar='DICT', type=str, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL,DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)') + parser.add_argument('-m', '--marker_size', metavar='MKR', type=float, default=6, help='aruco marker size (cm)') + args = parser.parse_args() + + print(f'Track Aruco markers from the {args.dictionary} dictionary') + + # Create tobii controller + tobii_controller = TobiiController.TobiiController(args.tobii_ip, 'myProject', 'mySelf') + + # Calibrate tobii glasses + tobii_controller.calibrate() + + # Enable tobii data stream + tobii_data_stream = tobii_controller.enable_data_stream() + + # Enable tobii video stream + tobii_video_stream = tobii_controller.enable_video_stream() + + # create aruco camera + aruco_camera = ArUcoCamera.ArUcoCamera() + aruco_camera.load_calibration_file(args.camera_calibration) + + # Create aruco tracker + aruco_tracker = ArUcoTracker.ArUcoTracker(args.dictionary, args.marker_size, aruco_camera) + + # Start streaming + tobii_controller.start_streaming() + + # Live video stream capture loop + try: + + past_gaze_positions = DataStructures.TimeStampedBuffer() + + while tobii_video_stream.is_alive(): + + video_ts, video_frame = tobii_video_stream.read() + + try: + + # Read data stream + data_stream = tobii_data_stream.read() + + # Store received gaze positions + past_gaze_positions.append(data_stream['GazePosition']) + + # Get last gaze position before video timestamp and remove all former gaze positions + earliest_ts, earliest_gaze_position = past_gaze_positions.pop_first_until(video_ts) + + # Draw gaze position + video_gaze_pixel = (int(earliest_gaze_position.value[0] * video_frame.width), int(earliest_gaze_position.value[1] * video_frame.height)) + cv.circle(video_frame.matrix, video_gaze_pixel, 4, (0, 255, 255), -1) + + # Wait for gaze position + except (AttributeError, ValueError): + continue + + # Track markers with pose estimation and draw them + aruco_tracker.track(video_frame.matrix) + aruco_tracker.draw(video_frame.matrix) + + # Close window using 'Esc' key + if cv.waitKey(1) == 27: + break + + cv.imshow('Live Scene', video_frame.matrix) + + # Exit on 'ctrl+C' interruption + except KeyboardInterrupt: + pass + + # Stop frame display + cv.destroyAllWindows() + + # Stop streaming + tobii_controller.stop_streaming() + +if __name__ == '__main__': + + main() \ No newline at end of file diff --git a/src/argaze/utils/tobii_stream_aruco_aoi_ivy_application.py b/src/argaze/utils/tobii_stream_aruco_aoi_ivy_application.py new file mode 100644 index 0000000..70190e2 --- /dev/null +++ b/src/argaze/utils/tobii_stream_aruco_aoi_ivy_application.py @@ -0,0 +1,122 @@ + #!/usr/bin/env python + +import argparse +import os + +from argaze import DataStructures, GazeFeatures +from argaze.ArUcoMarkers import ArUcoMarkersDictionary +from argaze.AreaOfInterest import * + +import cv2 as cv +import numpy + +from ivy.std_api import * + +def main(): + """ + Define AOI scene from a ArUco marker and bind to Ivy default bus to receive live look at pointer data. + """ + + # Manage arguments + parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) + parser.add_argument('-y', '--ivy_bus', metavar='IVY_BUS', type=str, default='0.0.0.0:2010', help='Ivy bus ip and port') + parser.add_argument('-a', '--aoi_scene', metavar='AOI_SCENE', type=str, default='aoi3D_scene.obj', help='obj aoi scene filepath') + parser.add_argument('-d', '--dictionary', metavar='DICT', type=str, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL,DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)') + parser.add_argument('-m', '--marker_size', metavar='MKR', type=float, default=6, help='aruco marker size (cm)') + parser.add_argument('-i', '--marker_id', metavar='MARKER_ID', type=int, default=0, help='marker id to display') + args = parser.parse_args() + + # Enable Ivy bus + IvyInit(os.path.basename(__file__)) + IvyStart(args.ivy_bus) + + def on_looking_message(*args): + + look_at = numpy.fromstring(args[2].replace('[','').replace(']',''), dtype=float, count=2, sep=', ') + + visu_gaze_pixel = aoi2D_visu_scene[args[1]].looked_pixel(look_at) + + cv.circle(visu_frame, visu_gaze_pixel, 4, (0, 0, 255), -1) + + IvyBindMsg(on_looking_message, 'looking (.*) at (.*)') + + # Create AOIs 3D scene + aoi3D_scene = AOI3DScene.AOI3DScene() + aoi3D_scene.load(args.aoi_scene) + print(f'AOIs names: {aoi3D_scene.keys()}') + + # Create a visual scan visualisation frame + visu_width = 1920 + visu_height = 1080 + visu_ratio = visu_height + visu_frame = numpy.full((visu_height, visu_width, 3), 255, dtype=numpy.uint8) + + cv.imshow('Scene', visu_frame) + + # Project 3D scene on the reference frame + # TODO : center projection on a reference AOI + ref_aoi = 'Scene_Plan' + + # TODO: pass the reference AOI in argument + aoi3D_scene_rotation = numpy.array([[-numpy.pi, 0.0, 0.0]]) + aoi3D_scene_translation = numpy.array([[19.0, 8.0, 25.0]]) + + # Edit a projection matrix for the reference frame + K0 = numpy.array([[visu_ratio, 0.0, visu_width/2], [0.0, visu_ratio, visu_height/2], [0.0, 0.0, 1.0]]) + + aoi2D_visu_scene = aoi3D_scene.project(aoi3D_scene_translation, aoi3D_scene_rotation, K0) + + # Create aruco markers dictionary + aruco_markers_dict = ArUcoMarkersDictionary.ArUcoMarkersDictionary(args.dictionary) + + # Create aruco marker + marker_box = aoi2D_visu_scene['Marker_Plan'].bounding_box().astype(int) + marker_size = marker_box[2] - marker_box[0] + marker = aruco_markers_dict.create_marker(args.marker_id, int(marker_size[0])) + print(f'Creating Aruco marker {args.marker_id} from the {args.dictionary} dictionary') + + def draw_scene(): + + # Clear frame + visu_frame[:] = 255 + + # Display AOI 2D scene + for name, aoi in aoi2D_visu_scene.items(): + aoi.draw(visu_frame, (0, 0, 0)) + + # Display aruco marker + visu_frame[marker_box[0][1]:marker_box[2][1], marker_box[0][0]:marker_box[2][0], :] = marker + + # On mouse over : redraw scene and draw target + def on_mouse_event(event, x, y, flags, param): + + draw_scene() + + # Draw target + cv.circle(visu_frame, (x, y), 40, (0, 255, 255), -1) + + cv.setMouseCallback('Scene', on_mouse_event) + + # Screen display loop + try: + + draw_scene() + + while True: + + # Close window using 'Esc' key + if cv.waitKey(1) == 27: + break + + cv.imshow('Scene', visu_frame) + + # Exit on 'ctrl+C' interruption + except KeyboardInterrupt: + pass + + # Stop frame display + cv.destroyAllWindows() + +if __name__ == '__main__': + + main() \ No newline at end of file diff --git a/src/argaze/utils/tobii_stream_aruco_aoi_ivy_controller.py b/src/argaze/utils/tobii_stream_aruco_aoi_ivy_controller.py new file mode 100644 index 0000000..070e3ee --- /dev/null +++ b/src/argaze/utils/tobii_stream_aruco_aoi_ivy_controller.py @@ -0,0 +1,286 @@ + #!/usr/bin/env python + +import argparse +import os +import json + +from argaze import DataStructures, GazeFeatures +from argaze.TobiiGlassesPro2 import * +from argaze.ArUcoMarkers import ArUcoTracker, ArUcoCamera +from argaze.AreaOfInterest import * + +import cv2 as cv +import numpy + +from ivy.std_api import * + +def main(): + """ + Track any ArUco marker into Tobii Glasses Pro 2 camera video stream. + For each loaded AOI scene .obj file, position the scene virtually relatively to each detected ArUco markers and project the scene into camera frame. + Then, detect if Tobii gaze point is inside any AOI and send the look at pointer over Ivy default bus. + """ + + # Manage arguments + parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) + parser.add_argument('-t', '--tobii_ip', metavar='TOBII_IP', type=str, default='192.168.1.10', help='tobii glasses ip') + parser.add_argument('-p', '--project_name', metavar='PROJECT_NAME', type=str, default=TobiiController.DEFAULT_PROJECT_NAME, help='project name') + parser.add_argument('-u', '--participant_name', metavar='PARTICIPANT_NAME', type=str, default=TobiiController.DEFAULT_PARTICIPANT_NAME, help='participant name') + parser.add_argument('-c', '--camera_calibration', metavar='CAM_CALIB', type=str, default='tobii_camera.json', help='json camera calibration filepath') + parser.add_argument('-y', '--ivy_bus', metavar='IVY_BUS', type=str, default='0.0.0.0:2010', help='Ivy bus ip and port') + parser.add_argument('-md', '--marker_dictionary', metavar='DICT', type=str, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL,DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)') + parser.add_argument('-ms', '--marker_size', metavar='MKR', type=float, default=6, help='aruco marker size (cm)') + parser.add_argument('-mi', '--marker_id_scene', metavar='MARKER_ID_SCENE', type=json.loads, help='{"marker": "aoi scene filepath"} dictionary') + args = parser.parse_args() + + # Manage markers id to track + if args.marker_id_scene == None: + print(f'Track any Aruco markers from the {args.marker_dictionary} dictionary') + else: + print(f'Track Aruco markers {args.marker_id_scene.keys()} from the {args.marker_dictionary} dictionary') + + # Enable Ivy bus + IvyInit(os.path.basename(__file__)) + IvyStart(args.ivy_bus) + + # Create tobii controller + tobii_controller = TobiiController.TobiiController(args.tobii_ip, args.project_name, args.participant_name) + + # Calibrate tobii glasses + tobii_controller.calibrate() + + # Enable tobii data stream + tobii_data_stream = tobii_controller.enable_data_stream() + + # Enable tobii video stream + tobii_video_stream = tobii_controller.enable_video_stream() + + # create aruco camera + aruco_camera = ArUcoCamera.ArUcoCamera() + aruco_camera.load_calibration_file(args.camera_calibration) + + # Create aruco tracker + aruco_tracker = ArUcoTracker.ArUcoTracker(args.marker_dictionary, args.marker_size, aruco_camera) + + # Load AOI 3D scene for each marker + aoi3D_scenes = {} + + for marker_id, aoi_scene_filepath in args.marker_id_scene.items(): + + marker_id = int(marker_id) + + aoi3D_scenes[marker_id] = AOI3DScene.AOI3DScene() + aoi3D_scenes[marker_id].load(aoi_scene_filepath) + + print(f'AOI in {os.path.basename(aoi_scene_filepath)} scene related to marker #{marker_id}') + for aoi in aoi3D_scenes[marker_id].keys(): + print(f'\t{aoi}') + + def aoi3D_scene_selector(marker_id): + return aoi3D_scenes.get(marker_id, None) + + # !!! the parameters below are specific to the TobiiGlassesPro2 !!! + # Reference : https://www.biorxiv.org/content/10.1101/299925v1 + tobii_accuracy = 1.42 # degree + tobii_precision = 0.34 # degree + tobii_camera_hfov = 82 # degree + tobii_visual_hfov = 160 # degree + + # Start streaming + tobii_controller.start_streaming() + + # Live video stream capture loop + try: + + past_gaze_positions = DataStructures.TimeStampedBuffer() + past_head_rotations = DataStructures.TimeStampedBuffer() + + head_moving = False + head_movement_last = 0. + + while tobii_video_stream.is_alive(): + + video_ts, video_frame = tobii_video_stream.read() + + # Copy video frame to edit visualisation on it without disrupting aruco tracking + visu_frame = video_frame.copy() + + # Process video and data frame + try: + + # Read data stream + data_stream = tobii_data_stream.read() + + # Store last received data + past_head_rotations.append(data_stream['Gyroscope']) + past_gaze_positions.append(data_stream['GazePosition']) + + # Get nearest head rotation before video timestamp and remove all head rotations before + _, nearest_head_rotation = tobii_ts_head_rotations.pop_first_until(video_ts) + + # Calculate head movement considering only head yaw and pitch + head_movement = numpy.array(nearest_head_rotation.value) + head_movement_px = head_movement.astype(int) + head_movement_norm = numpy.linalg.norm(head_movement[0:2]) + + # Draw movement vector + cv.line(visu_frame.matrix, (int(visu_frame.width/2), int(visu_frame.height/2)), (int(visu_frame.width/2) + head_movement_px[1], int(visu_frame.height/2) - head_movement_px[0]), (150, 150, 150), 3) + + # Head movement detection hysteresis + # TODO : pass the threshold value as argument + if not head_moving and head_movement_norm > 50: + head_moving = True + + if head_moving and head_movement_norm < 10: + head_moving = False + + # When head is moving, ArUco tracking could return bad pose estimation and so bad AOI scene projection + if head_moving: + raise AOIFeatures.AOISceneMissing('Head is moving') + + # Get nearest gaze position before video timestamp and remove all gaze positions before + _, nearest_gaze_position = tobii_ts_gaze_positions.pop_first_until(video_ts) + + # Ignore frame when gaze position is not valid + if nearest_gaze_position.validity == 1: + raise GazeFeatures.GazePositionMissing('Unvalid gaze position') + + gaze_position_pixel = GazeFeatures.GazePosition( (int(nearest_gaze_position.value[0] * visu_frame.width), int(nearest_gaze_position.value[1] * visu_frame.height)) ) + + # Draw gaze position + cv.circle(visu_frame.matrix, gaze_position_pixel, 2, (0, 255, 255), -1) + + # Get nearest gaze position 3D before video timestamp and remove all gaze positions before + _, nearest_gaze_position_3d = tobii_ts_gaze_positions_3d.pop_first_until(video_ts) + + # Ignore frame when gaze position 3D is not valid + if nearest_gaze_position_3d.validity == 1: + raise GazeFeatures.GazePositionMissing('Unvalid gaze position 3D') + + gaze_position_pixel = (int(nearest_gaze_position.value[0] * visu_frame.width), int(nearest_gaze_position.value[1] * visu_frame.height)) + + gaze_accuracy_mm = numpy.tan(numpy.deg2rad(tobii_accuracy)) * nearest_gaze_position_3d.value[2] + tobii_camera_hfov_mm = numpy.tan(numpy.deg2rad(tobii_camera_hfov / 2)) * nearest_gaze_position_3d.value[2] + + gaze_position_pixel.accuracy = round(visu_frame.width * float(gaze_accuracy_mm) / float(tobii_camera_hfov_mm)) + + # Draw gaze position and accuracy + cv.circle(visu_frame.matrix, gaze_position_pixel, 2, (0, 255, 255), -1) + cv.circle(visu_frame.matrix, gaze_position_pixel, gaze_position_pixel.accuracy, (0, 255, 255), 1) + + # Hide frame left and right borders before tracking to ignore markers outside focus area + cv.rectangle(video_frame.matrix, (0, 0), (int(video_frame.width/6), int(video_frame.height)), (0, 0, 0), -1) + cv.rectangle(video_frame.matrix, (int(video_frame.width*(1 - 1/6)), 0), (int(video_frame.width), int(video_frame.height)), (0, 0, 0), -1) + + # Track markers with pose estimation and draw them + aruco_tracker.track(video_frame.matrix) + aruco_tracker.draw(visu_frame.matrix) + + # When no marker is detected, no AOI scene projection can't be done + if aruco_tracker.get_markers_number() == 0: + raise AOIFeatures.AOISceneMissing('No marker detected') + + # Store aoi 2D video for further scene merging + aoi2D_dict = {} + + # Project 3D scenes related to each aruco markers + for (i, marker_id) in enumerate(aruco_tracker.get_markers_ids()): + + # Select 3D scene related to detected marker + aoi3D_scene = aoi3D_scene_selector(marker_id) + + if aoi3D_scene == None: + continue + + # Transform scene into camera referential + aoi3D_camera = aoi3D_scene.transform(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i)) + + # Get aoi inside vision cone field + cone_vision_height_cm = nearest_gaze_position_3d.value[2]/10 # cm + cone_vision_radius_cm = numpy.tan(numpy.deg2rad(tobii_visual_hfov / 2)) * cone_vision_height_cm + + aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(cone_vision_radius_cm, cone_vision_height_cm) + + # Keep only aoi inside vision cone field + aoi3D_scene = aoi3D_scene.copy(exclude=aoi3D_outside.keys()) + + # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it + # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable. + aoi2D_video_scene = aoi3D_scene.project(aruco_tracker.get_marker_translation(i), aruco_tracker.get_marker_rotation(i), aruco_camera.get_K()) + + # Store each 2D aoi for further scene merging + for name, aoi in aoi2D_video_scene.items(): + + if name not in aoi2D_dict.keys(): + aoi2D_dict[name] = [] + + aoi2D_dict[name].append(aoi.clockwise()) + + # Merge all 2D aoi into a single 2D scene + aoi2D_merged_scene = AOI2DScene.AOI2DScene() + for name, aoi_array in aoi2D_dict.items(): + aoi2D_merged_scene[name] = numpy.sum(aoi_array, axis=0) / len(aoi_array) + + aoi2D_merged_scene.draw(visu_frame.matrix, gaze_position_pixel, gaze_position_pixel.accuracy, exclude=['Visualisation_Plan']) + + # When the merged scene is empty + if len(aoi2D_merged_scene.keys()) == 0: + raise AOIFeatures.AOISceneMissing('Scene is empty') + + # Send look at aoi pointer + for name, aoi in aoi2D_merged_scene.items(): + + if aoi.looked(video_gaze_pixel): + + # 4 corners aoi + if len(aoi) == 4: + IvySendMsg(f'looking {name} at {aoi.look_at(video_gaze_pixel)}') + else: + IvySendMsg(f'looking {name}') + + # Raised when gaze data can't be processed + except GazeFeatures.GazeDataMissing as e: + + cv.rectangle(visu_frame.matrix, (0, 50), (550, 100), (127, 127, 127), -1) + cv.putText(visu_frame.matrix, str(e), (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) + + # Raised when aoi scene is missing + except AOIFeatures.AOISceneMissing as e: + + cv.rectangle(visu_frame.matrix, (0, 50), (550, 100), (127, 127, 127), -1) + cv.putText(visu_frame.matrix, str(e), (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA) + + # Raised when buffer is empty + except ValueError: + pass + + # Draw focus area + cv.rectangle(visu_frame.matrix, (int(video_frame.width/6), 0), (int(visu_frame.width*(1-1/6)), int(visu_frame.height)), (255, 150, 150), 1) + + # Draw center + cv.line(visu_frame.matrix, (int(visu_frame.width/2) - 50, int(visu_frame.height/2)), (int(visu_frame.width/2) + 50, int(visu_frame.height/2)), (255, 150, 150), 1) + cv.line(visu_frame.matrix, (int(visu_frame.width/2), int(visu_frame.height/2) - 50), (int(visu_frame.width/2), int(visu_frame.height/2) + 50), (255, 150, 150), 1) + + # Write stream timing + cv.rectangle(visu_frame.matrix, (0, 0), (550, 50), (63, 63, 63), -1) + cv.putText(visu_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) + + # Close window using 'Esc' key + if cv.waitKey(1) == 27: + break + + cv.imshow('Live Scene', visu_frame.matrix) + + # Exit on 'ctrl+C' interruption + except KeyboardInterrupt: + pass + + # Stop frame display + cv.destroyAllWindows() + + # Stop streaming + tobii_controller.stop_streaming() + +if __name__ == '__main__': + + main() \ No newline at end of file diff --git a/src/argaze/utils/tobii_stream_display.py b/src/argaze/utils/tobii_stream_display.py new file mode 100644 index 0000000..b849357 --- /dev/null +++ b/src/argaze/utils/tobii_stream_display.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python + +import argparse +import os, time + +from argaze import DataStructures, GazeFeatures +from argaze.TobiiGlassesPro2 import * + +import cv2 as cv +import numpy + +def main(): + """ + Capture video camera and gaze data streams and synchronise them. + """ + + # Manage arguments + parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) + parser.add_argument('-t', '--tobii_ip', metavar='TOBII_IP', type=str, default='192.168.1.12', help='tobii glasses ip') + + args = parser.parse_args() + + # Create tobii controller + tobii_controller = TobiiController.TobiiController(args.tobii_ip, 'myProject', 'mySelf') + + # Calibrate tobii glasses + tobii_controller.calibrate() + + # Enable tobii data stream + tobii_data_stream = tobii_controller.enable_data_stream() + + # Enable tobii video stream + tobii_video_stream = tobii_controller.enable_video_stream() + + # Start streaming + tobii_controller.start_streaming() + + # Live video stream capture loop + try: + + past_gaze_positions = DataStructures.TimeStampedBuffer() + + while tobii_video_stream.is_alive(): + + video_ts, video_frame = tobii_video_stream.read() + + try: + + # Read data stream + data_stream = tobii_data_stream.read() + + # Store received gaze positions + past_gaze_positions.append(data_stream['GazePosition']) + + # Get last gaze position before video timestamp and remove all former gaze positions + earliest_ts, earliest_gaze_position = past_gaze_positions.pop_first_until(video_ts) + + # Draw gaze position + video_gaze_pixel = (int(earliest_gaze_position.value[0] * video_frame.width), int(earliest_gaze_position.value[1] * video_frame.height)) + cv.circle(video_frame.matrix, video_gaze_pixel, 4, (0, 255, 255), -1) + + # Wait for gaze position + except (AttributeError, ValueError): + continue + + # Close window using 'Esc' key + if cv.waitKey(1) == 27: + break + + cv.imshow(f'Live Tobii Camera', video_frame.matrix) + + # Exit on 'ctrl+C' interruption + except KeyboardInterrupt: + pass + + # Stop frame display + cv.destroyAllWindows() + + # Stop streaming + tobii_controller.stop_streaming() + +if __name__ == '__main__': + + main() \ No newline at end of file -- cgit v1.1