diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/argaze/TobiiGlassesPro2/TobiiEntities.py | 2 | ||||
-rw-r--r-- | src/argaze/TobiiGlassesPro2/TobiiVideo.py | 6 | ||||
-rw-r--r-- | src/argaze/utils/export_tobii_segment_aruco_visual_scan.py | 13 | ||||
-rw-r--r-- | src/argaze/utils/export_tobii_segment_movements.py | 15 | ||||
-rw-r--r-- | src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py | 4 | ||||
-rw-r--r-- | src/argaze/utils/live_tobii_session.py | 6 | ||||
-rw-r--r-- | src/argaze/utils/replay_tobii_session.py | 31 |
7 files changed, 36 insertions, 41 deletions
diff --git a/src/argaze/TobiiGlassesPro2/TobiiEntities.py b/src/argaze/TobiiGlassesPro2/TobiiEntities.py index ea4994d..926b239 100644 --- a/src/argaze/TobiiGlassesPro2/TobiiEntities.py +++ b/src/argaze/TobiiGlassesPro2/TobiiEntities.py @@ -43,7 +43,7 @@ class TobiiSegment: raise RuntimeError(f'JSON fails to load {self.__segment_path}/{TOBII_SEGMENT_INFO_FILENAME}') self.__start_timestamp = start_timestamp - self.__end_timestamp = min(end_timestamp, int(item["seg_length"] * 1000000)) if end_timestamp != None else int(item["seg_length"] * 1000000) + self.__end_timestamp = min(end_timestamp, int(item["seg_length"] * 1e6)) if end_timestamp != None else int(item["seg_length"] * 1e6) if self.__start_timestamp >= self.__end_timestamp: raise ValueError('start time is equal or greater than end time.') diff --git a/src/argaze/TobiiGlassesPro2/TobiiVideo.py b/src/argaze/TobiiGlassesPro2/TobiiVideo.py index f9a67d3..fb84c04 100644 --- a/src/argaze/TobiiGlassesPro2/TobiiVideo.py +++ b/src/argaze/TobiiGlassesPro2/TobiiVideo.py @@ -47,7 +47,7 @@ class TobiiVideoSegment(): def get_duration(self): """Duration in microsecond""" if self.__end_timestamp == None: - return int((self.__stream.duration * self.__stream.time_base) * 1000000) - self.__start_timestamp + return int((self.__stream.duration * self.__stream.time_base) * 1e6) - self.__start_timestamp else: return self.__end_timestamp - self.__start_timestamp @@ -79,7 +79,7 @@ class TobiiVideoSegment(): frame = self.__container.decode(self.__stream).__next__() - video_ts = int(frame.time * 1000000) + video_ts = int(frame.time * 1e6) # Ignore before start timestamp if video_ts < self.__start_timestamp: @@ -182,7 +182,7 @@ class TobiiVideoStream(threading.Thread): # unlock frame access self.__read_lock.release() - return int(frame_tuple[0] * 1000000), TobiiVideoFrame(frame_tuple[1], frame_tuple[2], frame_tuple[3]) + return int(frame_tuple[0] * 1e6), TobiiVideoFrame(frame_tuple[1], frame_tuple[2], frame_tuple[3]) class TobiiVideoOutput(): """Export a video file at the same format than a given referent stream.""" diff --git a/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py b/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py index 6c34ba9..e830ed6 100644 --- a/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py +++ b/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py @@ -64,22 +64,21 @@ def main(): vs_video_filepath = f'{destination_path}/visual_scan.mp4' # Load a tobii segment - tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1000000), int(args.time_range[1] * 1000000) if args.time_range[1] != None else None) + tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None) # Load a tobii segment video tobii_segment_video = tobii_segment.load_video() - print(f'Video properties:\n\tduration: {tobii_segment_video.get_duration()/1000000} s\n\twidth: {tobii_segment_video.get_width()} px\n\theight: {tobii_segment_video.get_height()} px') + print(f'Video properties:\n\tduration: {tobii_segment_video.get_duration()/1e6} s\n\twidth: {tobii_segment_video.get_width()} px\n\theight: {tobii_segment_video.get_height()} px') # Load a tobii segment data tobii_segment_data = tobii_segment.load_data() - print(f'Data keys:') + print(f'Loaded data count:') for name in tobii_segment_data.keys(): - print(f'\t{name}') + print(f'\t{name}: {len(tobii_segment_data[name])} data') # Access to timestamped gaze positions data buffer - tobii_ts_gaze_positions = tobii_segment_data.gidx_l_gp - print(f'{len(tobii_ts_gaze_positions)} gaze positions loaded') + tobii_ts_gaze_positions = tobii_segment_data['GazePosition'] # Access to timestamped gaze 3D positions data buffer #tobii_ts_gaze_3d_positions = tobii_segment_data.gidx_gp3 @@ -177,7 +176,7 @@ def main(): closest_gaze_ts, closest_gaze_position = tobii_ts_gaze_positions.pop_first_until(video_ts) # Draw gaze position - video_gaze_pixel = (int(closest_gaze_position.gp[0] * video_frame.width), int(closest_gaze_position.gp[1] * video_frame.height)) + video_gaze_pixel = (int(closest_gaze_position.value[0] * video_frame.width), int(closest_gaze_position.value[1] * video_frame.height)) cv.circle(video_frame.matrix, video_gaze_pixel, 4, (0, 255, 255), -1) # Store gaze position at this time in millisecond diff --git a/src/argaze/utils/export_tobii_segment_movements.py b/src/argaze/utils/export_tobii_segment_movements.py index 0b0d867..7222414 100644 --- a/src/argaze/utils/export_tobii_segment_movements.py +++ b/src/argaze/utils/export_tobii_segment_movements.py @@ -47,28 +47,27 @@ def main(): movements_filepath = f'{destination_path}/movements.csv' # Load a tobii segment - tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1000000), int(args.time_range[1] * 1000000) if args.time_range[1] != None else None) + tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None) # Load a tobii segment video tobii_segment_video = tobii_segment.load_video() - print(f'Video properties:\n\tduration: {tobii_segment_video.get_duration()/1000000} s\n\twidth: {tobii_segment_video.get_width()} px\n\theight: {tobii_segment_video.get_height()} px') + print(f'Video properties:\n\tduration: {tobii_segment_video.get_duration()/1e6} s\n\twidth: {tobii_segment_video.get_width()} px\n\theight: {tobii_segment_video.get_height()} px') # Load a tobii segment data tobii_segment_data = tobii_segment.load_data() - print(f'Data keys:') + print(f'Loaded data count:') for name in tobii_segment_data.keys(): - print(f'\t{name}') + print(f'\t{name}: {len(tobii_segment_data[name])} data') # Access to timestamped gaze position data buffer - tobii_ts_gaze_positions = tobii_segment_data.gidx_l_gp - print(f'{len(tobii_ts_gaze_positions)} gaze positions loaded') + tobii_ts_gaze_positions = tobii_segment_data['GazePosition'] # Format tobii gaze position in pixel and store them using millisecond unit timestamp ts_gaze_positions = GazeFeatures.TimeStampedGazePositions() - for ts, tobii_data in tobii_ts_gaze_positions.items(): - video_gaze_pixel = (int(tobii_data.gp[0] * tobii_segment_video.get_width()), int(tobii_data.gp[1] * tobii_segment_video.get_height())) + for ts, tobii_gaze_position in tobii_ts_gaze_positions.items(): + video_gaze_pixel = (int(tobii_gaze_position.value[0] * tobii_segment_video.get_width()), int(tobii_gaze_position.value[1] * tobii_segment_video.get_height())) ts_gaze_positions[ts/1000] = video_gaze_pixel print(f'Dispersion threshold: {args.dispersion_threshold}') diff --git a/src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py b/src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py index aa4365d..13efe2d 100644 --- a/src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py +++ b/src/argaze/utils/live_tobii_aruco_aoi_ivy_controller.py @@ -80,13 +80,13 @@ def main(): data_stream = tobii_data_stream.read() # Store received gaze positions - past_gaze_positions.append(data_stream.gidx_l_gp) + past_gaze_positions.append(data_stream['GazePosition']) # Get last gaze position before video timestamp and remove all former gaze positions earliest_ts, earliest_gaze_position = past_gaze_positions.pop_first_until(video_ts) # Draw gaze position - video_gaze_pixel = (int(earliest_gaze_position.gp[0] * video_frame.width), int(earliest_gaze_position.gp[1] * video_frame.height)) + video_gaze_pixel = (int(earliest_gaze_position.value[0] * video_frame.width), int(earliest_gaze_position.value[1] * video_frame.height)) cv.circle(video_frame.matrix, video_gaze_pixel, 4, (0, 255, 255), -1) # Wait for gaze position diff --git a/src/argaze/utils/live_tobii_session.py b/src/argaze/utils/live_tobii_session.py index f71b18f..b849357 100644 --- a/src/argaze/utils/live_tobii_session.py +++ b/src/argaze/utils/live_tobii_session.py @@ -50,14 +50,14 @@ def main(): data_stream = tobii_data_stream.read() # Store received gaze positions - past_gaze_positions.append(data_stream.gidx_l_gp) + past_gaze_positions.append(data_stream['GazePosition']) # Get last gaze position before video timestamp and remove all former gaze positions earliest_ts, earliest_gaze_position = past_gaze_positions.pop_first_until(video_ts) # Draw gaze position - gaze_position = (int(earliest_gaze_position.gp[0] * video_frame.width), int(earliest_gaze_position.gp[1] * video_frame.height)) - cv.circle(video_frame.matrix, gaze_position, 4, (0, 255, 255), -1) + video_gaze_pixel = (int(earliest_gaze_position.value[0] * video_frame.width), int(earliest_gaze_position.value[1] * video_frame.height)) + cv.circle(video_frame.matrix, video_gaze_pixel, 4, (0, 255, 255), -1) # Wait for gaze position except (AttributeError, ValueError): diff --git a/src/argaze/utils/replay_tobii_session.py b/src/argaze/utils/replay_tobii_session.py index 4fb9f2b..86d0057 100644 --- a/src/argaze/utils/replay_tobii_session.py +++ b/src/argaze/utils/replay_tobii_session.py @@ -25,41 +25,38 @@ def main(): if args.segment_path != None: # Load a tobii segment - tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1000000), int(args.time_range[1] * 1000000) if args.time_range[1] != None else None) + tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None) # Load a tobii segment video tobii_segment_video = tobii_segment.load_video() - print(f'Video properties:\n\tduration: {tobii_segment_video.get_duration()/1000000} s\n\twidth: {tobii_segment_video.get_width()} px\n\theight: {tobii_segment_video.get_height()} px') + print(f'Video properties:\n\tduration: {tobii_segment_video.get_duration() / 1e6} s\n\twidth: {tobii_segment_video.get_width()} px\n\theight: {tobii_segment_video.get_height()} px') # Load a tobii segment data tobii_segment_data = tobii_segment.load_data() - print(f'Data keys:') + print(f'Loaded data count:') for name in tobii_segment_data.keys(): - print(f'\t{name}') + print(f'\t{name}: {len(tobii_segment_data[name])} data') # Access to timestamped gaze position data buffer - tobii_ts_gaze_positions = tobii_segment_data.gidx_l_gp - print(f'{len(tobii_ts_gaze_positions)} gaze positions loaded') + tobii_ts_gaze_positions = tobii_segment_data['GazePosition'] # Access to timestamped pupil diameter data buffer - tobii_ts_pupil_diameter = tobii_segment_data.gidx_pd_eye - print(f'{len(tobii_ts_pupil_diameter)} pupil diameters loaded') + tobii_ts_pupil_diameter = tobii_segment_data['PupilDiameter'] # Access to timestamped events data buffer - tobii_ts_events = tobii_segment_data.ets_type_tag - print(f'{len(tobii_ts_events)} events loaded') + tobii_ts_events = tobii_segment_data['Event'] # Video and data replay loop try: # Initialise progress bar - MiscFeatures.printProgressBar(0, tobii_segment_video.get_duration()/1000, prefix = 'Video progression:', suffix = 'Complete', length = 100) + MiscFeatures.printProgressBar(0, tobii_segment_video.get_duration() / 1e3, prefix = 'Video progression:', suffix = 'Complete', length = 100) # Iterate on video frames for video_ts, video_frame in tobii_segment_video.frames(): - video_ts_ms = video_ts / 1000 + video_ts_ms = video_ts / 1e3 # Write segment timing cv.putText(video_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) @@ -73,8 +70,8 @@ def main(): closest_pupil_ts, closest_pupil_diameter = tobii_ts_pupil_diameter.pop_first_until(video_ts) # Draw gaze position - gaze_position = (int(closest_gaze_position.gp[0] * video_frame.width), int(closest_gaze_position.gp[1] * video_frame.height)) - pupil_diameter = int((10 - closest_pupil_diameter.pd) / 2) + gaze_position = (int(closest_gaze_position.value[0] * video_frame.width), int(closest_gaze_position.value[1] * video_frame.height)) + pupil_diameter = int((10 - closest_pupil_diameter.value) / 2) cv.circle(video_frame.matrix, gaze_position, 10, (0, 255, 255), pupil_diameter) @@ -87,7 +84,7 @@ def main(): # Get closest event before video timestamp and remove all gaze positions before closest_event_ts, closest_event = tobii_ts_events.pop_first_until(video_ts) - print(closest_event_ts/1000, closest_event) + print(closest_event_ts / 1e3, closest_event) # Write events cv.putText(video_frame.matrix, str(closest_event), (20, 140), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA) @@ -104,8 +101,8 @@ def main(): cv.imshow(f'Segment {tobii_segment.get_id()} video', video_frame.matrix) # Update Progress Bar - progress = video_ts_ms - int(args.time_range[0] * 1000) - MiscFeatures.printProgressBar(progress, tobii_segment_video.get_duration()/1000, prefix = 'Video progression:', suffix = 'Complete', length = 100) + progress = video_ts_ms - int(args.time_range[0] * 1e3) + MiscFeatures.printProgressBar(progress, tobii_segment_video.get_duration() / 1e3, prefix = 'Video progression:', suffix = 'Complete', length = 100) # Exit on 'ctrl+C' interruption except KeyboardInterrupt: |