From 859a4b4230c1fc6fc0f61b5ae48f3e3f70bd5d2a Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 20 Apr 2022 16:04:27 +0200 Subject: Processing pointer based visual scan. --- src/argaze/GazeFeatures.py | 86 ++++++--- src/argaze/utils/README.md | 4 +- .../utils/export_tobii_segment_aruco_aois.py | 193 -------------------- .../export_tobii_segment_aruco_visual_scan.py | 198 +++++++++++++++++++++ src/argaze/utils/export_tobii_segment_fixations.py | 8 +- 5 files changed, 271 insertions(+), 218 deletions(-) delete mode 100644 src/argaze/utils/export_tobii_segment_aruco_aois.py create mode 100644 src/argaze/utils/export_tobii_segment_aruco_visual_scan.py (limited to 'src') diff --git a/src/argaze/GazeFeatures.py b/src/argaze/GazeFeatures.py index 13bd9f5..e132849 100644 --- a/src/argaze/GazeFeatures.py +++ b/src/argaze/GazeFeatures.py @@ -193,7 +193,24 @@ class DispersionBasedFixationIdentifier(FixationIdentifier): return -1, None -class VisualScan(): +class VisualScanStep(DataStructures.DictObject): + """Define a visual scan step as a duration and an area of interest.""" + + def __init__(self, duration, aoi): + + super().__init__(type(self).__name__, **{'duration': duration, 'aoi': aoi}) + +class TimeStampedVisualScanSteps(DataStructures.TimeStampedBuffer): + """Define timestamped buffer to store visual scan steps.""" + + def __setitem__(self, key, value: VisualScanStep): + """Force value to be a VisualScanStep""" + if type(value) != VisualScanStep: + raise ValueError('value must be a VisualScanStep') + + super().__setitem__(key, value) + +class VisualScanGenerator(): """Abstract class to define when an aoi starts to be looked and when it stops.""" def __init__(self, ts_aoi_scenes: AOIFeatures.TimeStampedAOIScenes): @@ -204,34 +221,68 @@ class VisualScan(): def __iter__(self): raise NotImplementedError('__iter__() method not implemented') - def __next__(self): - raise NotImplementedError('__next__() method not implemented') + def build(self): + + visual_scan_steps = TimeStampedVisualScanSteps() + + for ts, step in self: + + if step == None: + continue -class PointerBasedVisualScan(VisualScan): + if step.get_type() == 'VisualScanStep': + + visual_scan_steps[ts] = step + + return visual_scan_steps + +class PointerBasedVisualScan(VisualScanGenerator): """Build visual scan on the basis of AOI's pointer information.""" - def __init__(self, ts_aoi_scenes: AOIFeatures.TimeStampedAOIScenes, tolerance_to_lacking: int): + def __init__(self, ts_aoi_scenes: AOIFeatures.TimeStampedAOIScenes): # TODO : add tolerance_to_lacking ? super().__init__(ts_aoi_scenes) # process identification on a copy self.__ts_aoi_scenes = ts_aoi_scenes.copy() - def __iter__(self): - """Start to build visual scan.""" - return self + # a dictionary to store when an aoi starts to be looked + self.__start_dict = {} - def __next__(self): + def __iter__(self): + """Visual scan generator function.""" # while there is aoi scene to process - if len(self.__ts_aoi_scenes) > 0: + while len(self.__ts_aoi_scenes) > 0: + + (ts_current, aoi_scene_current) = self.__ts_aoi_scenes.pop_first() - #if not ts_aoi.looked: + #if not aoi_scene_current.looked: # raise ValueError('TimeStampedAOIScenes must be looked using look_at method.') - return # start timestamp, AOI name, duration + for name in aoi_scene_current.areas(): + + aoi_looked = aoi_scene_current[name].pointer != None + + if aoi_looked: + + if not name in self.__start_dict.keys(): + + # aoi starts to be looked + self.__start_dict[name] = ts_current -class FixationBasedVisualScan(VisualScan): + elif name in self.__start_dict.keys(): + + # aoi stops to be looked + ts_start = self.__start_dict[name] + duration = ts_current - ts_start + + # forget the aoi + del self.__start_dict[name] + + yield ts_start, VisualScanStep(duration, name) + +class FixationBasedVisualScan(VisualScanGenerator): """Build visual scan on the basis of timestamped fixations.""" def __init__(self, ts_aoi_scenes: AOIFeatures.TimeStampedAOIScenes, ts_fixations: TimeStampedFixations): @@ -246,11 +297,6 @@ class FixationBasedVisualScan(VisualScan): self.__ts_fixations = ts_fixations.copy() def __iter__(self): - """Start to build visual scan.""" - return self - - def __next__(self): + """Visual scan generator function.""" - # while there is aoi scene to process - if len(self.__ts_aoi_scenes) > 0: - return \ No newline at end of file + yield -1, None diff --git a/src/argaze/utils/README.md b/src/argaze/utils/README.md index 03dd7ec..3243b26 100644 --- a/src/argaze/utils/README.md +++ b/src/argaze/utils/README.md @@ -72,10 +72,10 @@ python ./src/argaze/utils/replay_tobii_session.py -s SEGMENT_PATH -r IN OUT python ./src/argaze/utils/export_tobii_segment_fixations.py -s SEGMENT_PATH -r IN OUT ``` -- Track ArUco markerinto a Tobii camera video segment (replace SEGMENT_PATH) into a time range selection (replace IN OUT). Load an aoi scene (replace AOI_SCENE) .obj file, position it virtually relatively to any detected ArUco markers and project the scene into camera frame. Then, detect if Tobii gaze point is inside any AOI. Export AOIs video and data. +- Track ArUco markerinto a Tobii camera video segment (replace SEGMENT_PATH) into a time range selection (replace IN OUT). Load an aoi scene (replace AOI_SCENE) .obj file, position it virtually relatively to any detected ArUco markers and project the scene into camera frame. Then, detect if Tobii gaze point is inside any AOI. Export AOIs video and visual scan data. ``` -python ./src/argaze/utils/export_tobii_segment_aruco_aois.py -s SEGMENT_PATH -c export/tobii_camera.json -m 7.5 -a AOI_SCENE -r IN OUT +python ./src/argaze/utils/export_tobii_segment_aruco_visual_scan.py -s SEGMENT_PATH -c export/tobii_camera.json -m 7.5 -a AOI_SCENE -r IN OUT ``` - Track ArUco markers (replace MARKER_ID) into Tobii camera video stream (replace IP_ADDRESS). Load an aoi scene (replace AOI_SCENE) .obj file, position it virtually relatively to any detected ArUco markers and project the scene into camera frame. Then, detect if Tobii gaze point is inside any AOI. diff --git a/src/argaze/utils/export_tobii_segment_aruco_aois.py b/src/argaze/utils/export_tobii_segment_aruco_aois.py deleted file mode 100644 index 8e8c3fd..0000000 --- a/src/argaze/utils/export_tobii_segment_aruco_aois.py +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/env python - -import argparse -import bisect -import os - -from argaze import DataStructures -from argaze import GazeFeatures -from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiVideo -from argaze.ArUcoMarkers import * -from argaze.AreaOfInterest import * -from argaze.utils import MiscFeatures - -import numpy - -import cv2 as cv - -def main(): - """ - Track any ArUco marker into Tobii Glasses Pro 2 segment video file. - From a loaded AOI scene .obj file, position the scene virtually relatively to any detected ArUco markers and project the scene into camera frame. - Then, detect if Tobii gaze point is inside any AOI. - Export AOIs video and data. - """ - - # Manage arguments - parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) - parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='segment path') - parser.add_argument('-r', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)') - parser.add_argument('-c', '--camera_calibration', metavar='CAM_CALIB', type=str, default='tobii_camera.json', help='json camera calibration filepath') - parser.add_argument('-a', '--aoi_scene', metavar='AOI_SCENE', type=str, default='aoi3D_scene.obj', help='obj aoi scene filepath') - parser.add_argument('-d', '--dictionary', metavar='DICT', type=str, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL,DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)') - parser.add_argument('-m', '--marker_size', metavar='MARKER_SIZE', type=float, default=6, help='aruco marker size (cm)') - parser.add_argument('-i', '--markers_id', metavar='MARKERS_ID', nargs='*', type=int, default=[], help='markers id to track') - parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)') - args = parser.parse_args() - - if args.segment_path != None: - - empty_marker_set = len(args.markers_id) == 0 - if empty_marker_set: - print(f'Track any Aruco markers from the {args.dictionary} dictionary') - else: - print(f'Track Aruco markers {args.markers_id} from the {args.dictionary} dictionary') - - # Manage destination path - if args.output != None: - - if not os.path.exists(os.path.dirname(args.output)): - - os.makedirs(os.path.dirname(args.output)) - print(f'{os.path.dirname(args.output)} folder created') - - aois_filepath = f'{args.output}/aois.json' - video_filepath = f'{args.output}/fullstream+visu.mp4' - - else: - - aois_filepath = f'{args.segment_path}/aois.json' - video_filepath = f'{args.segment_path}/fullstream+visu.mp4' - - # Load a tobii segment - tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1000000), int(args.time_range[1] * 1000000) if args.time_range[1] != None else None) - - # Load a tobii segment video - tobii_segment_video = tobii_segment.load_video() - print(f'Video duration: {tobii_segment_video.get_duration()/1000000}, width: {tobii_segment_video.get_width()}, height: {tobii_segment_video.get_height()}') - - # Load a tobii segment data - tobii_segment_data = tobii_segment.load_data() - print(f'Data keys: {tobii_segment_data.keys()}') - - # Access to timestamped gaze position data buffer - tobii_ts_gaze_positions = tobii_segment_data.gidx_l_gp - print(f'{len(tobii_ts_gaze_positions)} gaze positions loaded') - - # Prepare video exportation at the same format than segment video - output_video = TobiiVideo.TobiiVideoOutput(video_filepath, tobii_segment_video.get_stream()) - - # Create aruco camera - aruco_camera = ArUcoCamera.ArUcoCamera() - aruco_camera.load_calibration_file(args.camera_calibration) - - # Create aruco tracker - aruco_tracker = ArUcoTracker.ArUcoTracker(args.dictionary, args.marker_size, aruco_camera) - - # Create AOIs 3D scene - aoi3D_scene = AOI3DScene.AOI3DScene() - aoi3D_scene.load(args.aoi_scene) - print(f'AOIs names: {aoi3D_scene.areas()}') - - # Create timestamped buffer to store AOIs scene in time - ts_aois_scenes = AOIFeatures.TimeStampedAOIScenes() - - # Create timestamped buffer to store gaze positions in time - ts_gaze_positions = GazeFeatures.TimeStampedGazePositions() - - # Video and data replay loop - try: - - # Count frame to display a progress bar - MiscFeatures.printProgressBar(0, tobii_segment_video.get_duration(), prefix = 'Progress:', suffix = 'Complete', length = 100) - - # Iterate on video frames activating video / data synchronisation through vts data buffer - for video_ts, video_frame in tobii_segment_video.frames(tobii_segment_data.vts): - - try: - - # Get closest gaze position before video timestamp and remove all gaze positions before - closest_gaze_ts, closest_gaze_position = tobii_ts_gaze_positions.pop_first_until(video_ts) - - # Draw video synchronized gaze position - gaze_position = GazeFeatures.GazePosition(int(closest_gaze_position.gp[0] * video_frame.width), int(closest_gaze_position.gp[1] * video_frame.height)) - cv.circle(video_frame.matrix, tuple(gaze_position), 4, (0, 255, 255), -1) - - # Store gaze position at this time - ts_gaze_positions[video_ts] = gaze_position - - # When expected values can't be found - except (KeyError, AttributeError, ValueError): - - pass # keep last gaze position - - # Track markers with pose estimation and draw them - aruco_tracker.track(video_frame.matrix) - aruco_tracker.draw(video_frame.matrix) - - # Project 3D scene related to each aruco marker - if aruco_tracker.get_markers_number(): - - for (i, marker_id) in enumerate(aruco_tracker.get_markers_ids()): - - # TODO : Select different 3D scene depending on aruco id - - in_marker_set = marker_id in list(args.markers_id) - - if not empty_marker_set and not in_marker_set: - continue - - aoi3D_scene.rotation = aruco_tracker.get_marker_rotation(i) - aoi3D_scene.translation = aruco_tracker.get_marker_translation(i) - - # Edit Zero distorsion matrix - D0 = numpy.asarray([0.0, 0.0, 0.0, 0.0, 0.0]) - - # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it - # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable. - aoi2D_scene = aoi3D_scene.project(aruco_camera.get_K(), D0) - - # Check which 2D aois is looked - if gaze_position != None: - aoi2D_scene.look_at(gaze_position) - - # Draw 2D aois - aoi2D_scene.draw(video_frame.matrix) - - # Store 2D aois scene at this time - ts_aois_scenes[video_ts] = aoi2D_scene - - # Close window using 'Esc' key - if cv.waitKey(1) == 27: - break - - # Display video - cv.imshow(f'Segment {tobii_segment.get_id()} video', video_frame.matrix) - - # Write video - output_video.write(video_frame.matrix) - - # Update Progress Bar - progress = video_ts - int(args.time_range[0] * 1000000) # - tobii_segment_video.get_vts_offset() ? - MiscFeatures.printProgressBar(progress, tobii_segment_video.get_duration(), prefix = 'Progress:', suffix = 'Complete', length = 100) - - # Exit on 'ctrl+C' interruption - except KeyboardInterrupt: - pass - - # Stop frame display - cv.destroyAllWindows() - - # End output video file - output_video.close() - - print(f'\nAOIs video saved into {video_filepath}') - - # Export 2D aois - ts_aois_scenes.export_as_json(aois_filepath) - - print(f'Timestamped AOIs positions saved into {aois_filepath}') - -if __name__ == '__main__': - - main() \ No newline at end of file diff --git a/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py b/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py new file mode 100644 index 0000000..290bdb7 --- /dev/null +++ b/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py @@ -0,0 +1,198 @@ +#!/usr/bin/env python + +import argparse +import bisect +import os + +from argaze import DataStructures +from argaze import GazeFeatures +from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiVideo +from argaze.ArUcoMarkers import * +from argaze.AreaOfInterest import * +from argaze.utils import MiscFeatures + +import numpy + +import cv2 as cv + +def main(): + """ + Track any ArUco marker into Tobii Glasses Pro 2 segment video file. + From a loaded AOI scene .obj file, position the scene virtually relatively to any detected ArUco markers and project the scene into camera frame. + Then, detect if Tobii gaze point is inside any AOI. + Export AOIs video and data. + """ + + # Manage arguments + parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) + parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='segment path') + parser.add_argument('-r', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)') + parser.add_argument('-c', '--camera_calibration', metavar='CAM_CALIB', type=str, default='tobii_camera.json', help='json camera calibration filepath') + parser.add_argument('-a', '--aoi_scene', metavar='AOI_SCENE', type=str, default='aoi3D_scene.obj', help='obj aoi scene filepath') + parser.add_argument('-d', '--dictionary', metavar='DICT', type=str, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL,DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)') + parser.add_argument('-m', '--marker_size', metavar='MARKER_SIZE', type=float, default=6, help='aruco marker size (cm)') + parser.add_argument('-i', '--markers_id', metavar='MARKERS_ID', nargs='*', type=int, default=[], help='markers id to track') + parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)') + args = parser.parse_args() + + if args.segment_path != None: + + empty_marker_set = len(args.markers_id) == 0 + if empty_marker_set: + print(f'Track any Aruco markers from the {args.dictionary} dictionary') + else: + print(f'Track Aruco markers {args.markers_id} from the {args.dictionary} dictionary') + + # Manage destination path + if args.output != None: + + if not os.path.exists(os.path.dirname(args.output)): + + os.makedirs(os.path.dirname(args.output)) + print(f'{os.path.dirname(args.output)} folder created') + + visual_scan_filepath = f'{args.output}/visual_scan.json' + video_filepath = f'{args.output}/fullstream+visu.mp4' + + else: + + visual_scan_filepath = f'{args.segment_path}/visual_scan.json' + video_filepath = f'{args.segment_path}/fullstream+visu.mp4' + + # Load a tobii segment + tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1000000), int(args.time_range[1] * 1000000) if args.time_range[1] != None else None) + + # Load a tobii segment video + tobii_segment_video = tobii_segment.load_video() + print(f'Video duration: {tobii_segment_video.get_duration()/1000000}, width: {tobii_segment_video.get_width()}, height: {tobii_segment_video.get_height()}') + + # Load a tobii segment data + tobii_segment_data = tobii_segment.load_data() + print(f'Data keys: {tobii_segment_data.keys()}') + + # Access to timestamped gaze position data buffer + tobii_ts_gaze_positions = tobii_segment_data.gidx_l_gp + print(f'{len(tobii_ts_gaze_positions)} gaze positions loaded') + + # Prepare video exportation at the same format than segment video + output_video = TobiiVideo.TobiiVideoOutput(video_filepath, tobii_segment_video.get_stream()) + + # Create aruco camera + aruco_camera = ArUcoCamera.ArUcoCamera() + aruco_camera.load_calibration_file(args.camera_calibration) + + # Create aruco tracker + aruco_tracker = ArUcoTracker.ArUcoTracker(args.dictionary, args.marker_size, aruco_camera) + + # Create AOIs 3D scene + aoi3D_scene = AOI3DScene.AOI3DScene() + aoi3D_scene.load(args.aoi_scene) + print(f'AOIs names: {aoi3D_scene.areas()}') + + # Create timestamped buffer to store AOIs scene in time + ts_aois_scenes = AOIFeatures.TimeStampedAOIScenes() + + # Create timestamped buffer to store gaze positions in time + ts_gaze_positions = GazeFeatures.TimeStampedGazePositions() + + # Video and data replay loop + try: + + # Initialise progress bar + MiscFeatures.printProgressBar(0, tobii_segment_video.get_duration(), prefix = 'Progress:', suffix = 'Complete', length = 100) + + # Iterate on video frames activating video / data synchronisation through vts data buffer + for video_ts, video_frame in tobii_segment_video.frames(tobii_segment_data.vts): + + try: + + # Get closest gaze position before video timestamp and remove all gaze positions before + closest_gaze_ts, closest_gaze_position = tobii_ts_gaze_positions.pop_first_until(video_ts) + + # Draw video synchronized gaze position + gaze_position = GazeFeatures.GazePosition(int(closest_gaze_position.gp[0] * video_frame.width), int(closest_gaze_position.gp[1] * video_frame.height)) + cv.circle(video_frame.matrix, tuple(gaze_position), 4, (0, 255, 255), -1) + + # Store gaze position at this time in millisecond + ts_gaze_positions[video_ts] = gaze_position + + # When expected values can't be found + except (KeyError, AttributeError, ValueError): + + pass # keep last gaze position + + # Track markers with pose estimation and draw them + aruco_tracker.track(video_frame.matrix) + aruco_tracker.draw(video_frame.matrix) + + # Project 3D scene related to each aruco marker + if aruco_tracker.get_markers_number(): + + for (i, marker_id) in enumerate(aruco_tracker.get_markers_ids()): + + # TODO : Select different 3D scene depending on aruco id + + in_marker_set = marker_id in list(args.markers_id) + + if not empty_marker_set and not in_marker_set: + continue + + aoi3D_scene.rotation = aruco_tracker.get_marker_rotation(i) + aoi3D_scene.translation = aruco_tracker.get_marker_translation(i) + + # Edit Zero distorsion matrix + D0 = numpy.asarray([0.0, 0.0, 0.0, 0.0, 0.0]) + + # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it + # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable. + aoi2D_scene = aoi3D_scene.project(aruco_camera.get_K(), D0) + + # Check which 2D aois is looked + if gaze_position != None: + aoi2D_scene.look_at(gaze_position) + + # Draw 2D aois + aoi2D_scene.draw(video_frame.matrix) + + # Store 2D aois scene at this time in millisecond + ts_aois_scenes[video_ts] = aoi2D_scene + + # Close window using 'Esc' key + if cv.waitKey(1) == 27: + break + + # Display video + cv.imshow(f'Segment {tobii_segment.get_id()} video', video_frame.matrix) + + # Write video + output_video.write(video_frame.matrix) + + # Update Progress Bar + progress = video_ts - int(args.time_range[0] * 1000000) # - tobii_segment_video.get_vts_offset() ? + MiscFeatures.printProgressBar(progress, tobii_segment_video.get_duration(), prefix = 'Progress:', suffix = 'Complete', length = 100) + + # Exit on 'ctrl+C' interruption + except KeyboardInterrupt: + pass + + # Stop frame display + cv.destroyAllWindows() + + # End output video file + output_video.close() + + print(f'\nAOIs video saved into {video_filepath}') + + # Build visual scan based on aoi's pointer + visual_scan = GazeFeatures.PointerBasedVisualScan(ts_aois_scenes).build() + + print(f'{len(visual_scan)} visual scan steps found') + + # Export visual scan + visual_scan.export_as_json(visual_scan_filepath) + + print(f'Visual scan saved into {visual_scan_filepath}') + +if __name__ == '__main__': + + main() \ No newline at end of file diff --git a/src/argaze/utils/export_tobii_segment_fixations.py b/src/argaze/utils/export_tobii_segment_fixations.py index f232495..f0a8a4d 100644 --- a/src/argaze/utils/export_tobii_segment_fixations.py +++ b/src/argaze/utils/export_tobii_segment_fixations.py @@ -64,11 +64,11 @@ def main(): print(f'Dispersion threshold: {args.dispersion_threshold}') print(f'Duration threshold: {args.duration_threshold}') - fixation_analyser = GazeFeatures.DispersionBasedFixationIdentifier(generic_ts_gaze_positions, args.dispersion_threshold, args.duration_threshold) - # Start fixation identification + fixation_analyser = GazeFeatures.DispersionBasedFixationIdentifier(generic_ts_gaze_positions, args.dispersion_threshold, args.duration_threshold) ts_fixations = GazeFeatures.TimeStampedFixations() + # Initialise progress bar MiscFeatures.printProgressBar(0, int(tobii_segment_video.get_duration()/1000), prefix = 'Progress:', suffix = 'Complete', length = 100) for ts, item in fixation_analyser: @@ -80,7 +80,9 @@ def main(): ts_fixations[ts] = item - MiscFeatures.printProgressBar(ts-int(args.time_range[0]*1000), int(tobii_segment_video.get_duration()/1000), prefix = 'Progress:', suffix = 'Complete', length = 100) + # Update Progress Bar + progress = ts - int(args.time_range[0] * 1000) + MiscFeatures.printProgressBar(progress, int(tobii_segment_video.get_duration()/1000), prefix = 'Progress:', suffix = 'Complete', length = 100) print(f'\n{len(ts_fixations)} fixations found') -- cgit v1.1