aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/argaze/utils/tobii_segment_gaze_movements_export.py459
1 files changed, 296 insertions, 163 deletions
diff --git a/src/argaze/utils/tobii_segment_gaze_movements_export.py b/src/argaze/utils/tobii_segment_gaze_movements_export.py
index 1ffc836..9b6ee5c 100644
--- a/src/argaze/utils/tobii_segment_gaze_movements_export.py
+++ b/src/argaze/utils/tobii_segment_gaze_movements_export.py
@@ -2,14 +2,17 @@
import argparse
import os
+import math
-from argaze import GazeFeatures
+from argaze import DataStructures, GazeFeatures
+from argaze.AreaOfInterest import AOIFeatures
from argaze.GazeAnalysis import DispersionBasedGazeMovementIdentifier
from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiVideo, TobiiSpecifications
from argaze.utils import MiscFeatures
import cv2 as cv
import numpy
+import pandas
def main():
"""
@@ -18,245 +21,375 @@ def main():
# Manage arguments
parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
- parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='path to a tobii segment folder')
- parser.add_argument('-r', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)')
- parser.add_argument('-d', '--dispersion_threshold', metavar='DISPERSION_THRESHOLD', type=int, default=50, help='dispersion threshold in pixel')
- parser.add_argument('-t', '--duration_threshold', metavar='DURATION_THRESHOLD', type=int, default=100, help='duration threshold in millisecond')
+ parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='path to a tobii segment folder', required=True)
+ parser.add_argument('-a', '--aoi', metavar='AOI_NAME', type=str, default=None, help='aoi name where to project gaze', required=True)
+ parser.add_argument('-t', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)')
+ parser.add_argument('-di', '--dispersion_threshold', metavar='DISPERSION_THRESHOLD', type=int, default=50, help='dispersion threshold in pixel')
+ parser.add_argument('-du', '--duration_threshold', metavar='DURATION_THRESHOLD', type=int, default=100, help='duration threshold in millisecond')
parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)')
parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction)
args = parser.parse_args()
- if args.segment_path != None:
+ # Manage destination path
+ destination_path = '.'
+ if args.output != None:
- # Manage destination path
- destination_path = '.'
- if args.output != None:
+ if not os.path.exists(os.path.dirname(args.output)):
- if not os.path.exists(os.path.dirname(args.output)):
+ os.makedirs(os.path.dirname(args.output))
+ print(f'{os.path.dirname(args.output)} folder created')
- os.makedirs(os.path.dirname(args.output))
- print(f'{os.path.dirname(args.output)} folder created')
+ destination_path = args.output
- destination_path = args.output
+ else:
- else:
+ destination_path = args.segment_path
+
+ # Export into a dedicated time range folder
+ if args.time_range[1] != None:
+ timerange_path = f'[{int(args.time_range[0])}s - {int(args.time_range[1])}s]'
+ else:
+ timerange_path = f'[all]'
+
+ destination_path = f'{destination_path}/{timerange_path}/{args.aoi}'
+
+ if not os.path.exists(destination_path):
+
+ os.makedirs(destination_path)
+ print(f'{destination_path} folder created')
+
+ aoi_filepath = f'{destination_path}/../aoi.json'
- destination_path = args.segment_path
+ fixations_filepath = f'{destination_path}/gaze_fixations.csv'
+ saccades_filepath = f'{destination_path}/gaze_saccades.csv'
- # Export into a dedicated time range folder
- if args.time_range[1] != None:
- timerange_path = f'[{int(args.time_range[0])}s - {int(args.time_range[1])}s]'
- else:
- timerange_path = f'[all]'
+ gaze_status_filepath = f'{destination_path}/gaze_status.csv'
+ gaze_status_video_filepath = f'{destination_path}/gaze_status.mp4'
+ gaze_status_image_filepath = f'{destination_path}/gaze_status.png'
- destination_path = f'{destination_path}/{timerange_path}'
+ gaze_metrics_filepath = f'{destination_path}/gaze_metrics.csv'
- if not os.path.exists(destination_path):
+ # Load aoi scene projection
+ ts_aois_projections = DataStructures.TimeStampedBuffer.from_json(aoi_filepath)
- os.makedirs(destination_path)
- print(f'{destination_path} folder created')
+ print(f'\nAOI frames: ', len(ts_aois_projections))
+ aoi_names = ts_aois_projections.as_dataframe().drop(['offset','comment'], axis=1).columns
+ for aoi_name in aoi_names:
+ print(f'\t{aoi_name}')
- fixations_filepath = f'{destination_path}/gaze_fixations.csv'
- saccades_filepath = f'{destination_path}/gaze_saccades.csv'
+ # Load tobii segment
+ tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None)
- gaze_status_filepath = f'{destination_path}/gaze_status.csv'
- gaze_status_video_filepath = f'{destination_path}/gaze_status.mp4'
+ # Get participant name
+ participant_name = TobiiEntities.TobiiParticipant(f'{args.segment_path}/../../').name
- # Load a tobii segment
- tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None)
+ print(f'\nParticipant: {participant_name}')
- # Load a tobii segment video
- tobii_segment_video = tobii_segment.load_video()
- print(f'Video properties:\n\tduration: {tobii_segment_video.duration/1e6} s\n\twidth: {tobii_segment_video.width} px\n\theight: {tobii_segment_video.height} px')
+ # Load a tobii segment video
+ tobii_segment_video = tobii_segment.load_video()
+ print(f'\nVideo properties:\n\tduration: {tobii_segment_video.duration/1e6} s\n\twidth: {tobii_segment_video.width} px\n\theight: {tobii_segment_video.height} px')
- # Load a tobii segment data
- tobii_segment_data = tobii_segment.load_data()
-
- print(f'Loaded data count:')
- for name in tobii_segment_data.keys():
- print(f'\t{name}: {len(tobii_segment_data[name])} data')
+ # Load a tobii segment data
+ tobii_segment_data = tobii_segment.load_data()
+
+ print(f'\nLoaded data count:')
+ for name in tobii_segment_data.keys():
+ print(f'\t{name}: {len(tobii_segment_data[name])} data')
- # Access to timestamped gaze position data buffer
- tobii_ts_gaze_positions = tobii_segment_data['GazePosition']
+ # Access to timestamped gaze position data buffer
+ tobii_ts_gaze_positions = tobii_segment_data['GazePosition']
- # Access to timestamped gaze 3D positions data buffer
- tobii_ts_gaze_positions_3d = tobii_segment_data['GazePosition3D']
+ # Access to timestamped gaze 3D positions data buffer
+ tobii_ts_gaze_positions_3d = tobii_segment_data['GazePosition3D']
+
+ # Format tobii gaze position and accuracy in pixel and project it in aoi scene
+ ts_gaze_positions = GazeFeatures.TimeStampedGazePositions()
+
+ # Gaze projection metrics
+ ts_projection_metrics = DataStructures.TimeStampedBuffer()
+
+ # Initialise progress bar
+ MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGazePositions projection:', suffix = 'Complete', length = 100)
+
+ for ts, tobii_gaze_position in tobii_ts_gaze_positions.items():
+
+ # Update Progress Bar
+ progress = ts - int(args.time_range[0] * 1e6)
+ MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'GazePositions projection:', suffix = 'Complete', length = 100)
+
+ try:
- # Format tobii gaze position and accuracy in pixel
- ts_gaze_positions = GazeFeatures.TimeStampedGazePositions()
+ # Get the last aoi projection until the current gaze position timestamp
+ ts_current_aois, current_aois = ts_aois_projections.pop_last_until(ts)
- for ts, tobii_gaze_position in tobii_ts_gaze_positions.items():
+ assert(ts_current_aois <= ts)
- # Test gaze position validity
- if tobii_gaze_position.validity == 0:
+ # QUESTION: What todo if the current aoi is too old ?
+ # if the aoi didn't move it is not a problem...
+ # For the moment, we just provide a metric to assess its not too big
+ ts_projection_metrics[ts] = {'frame': ts_current_aois, 'age': ts - ts_current_aois}
- gaze_position_px = (int(tobii_gaze_position.value[0] * tobii_segment_video.width), int(tobii_gaze_position.value[1] * tobii_segment_video.height))
+ current_aoi_offset = current_aois.pop('offset')
+ current_aoi_comment = current_aois.pop('comment')
+
+ selected_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))
+
+ # Wait for aois projection
+ except KeyError:
+
+ continue
+
+ # Test gaze position validity
+ if tobii_gaze_position.validity == 0:
+
+ gaze_position_px = (int(tobii_gaze_position.value[0] * tobii_segment_video.width), int(tobii_gaze_position.value[1] * tobii_segment_video.height))
+
+ # Get gaze position 3D at same gaze position timestamp
+ tobii_gaze_position_3d = tobii_ts_gaze_positions_3d.pop(ts)
+
+ # Test gaze position 3d validity
+ if tobii_gaze_position_3d.validity == 0:
+
+ gaze_accuracy_mm = numpy.sin(numpy.deg2rad(TobiiSpecifications.ACCURACY)) * tobii_gaze_position_3d.value[2]
+ tobii_camera_hfov_mm = numpy.sin(numpy.deg2rad(TobiiSpecifications.CAMERA_HFOV)) * tobii_gaze_position_3d.value[2]
- # Get gaze position 3D at same gaze position timestamp
- tobii_gaze_position_3d = tobii_ts_gaze_positions_3d.pop(ts)
+ gaze_accuracy_px = round(tobii_segment_video.width * float(gaze_accuracy_mm) / float(tobii_camera_hfov_mm))
- # Test gaze position 3d validity
- if tobii_gaze_position_3d.validity == 0:
-
- gaze_accuracy_mm = numpy.sin(numpy.deg2rad(TobiiSpecifications.ACCURACY)) * tobii_gaze_position_3d.value[2]
- tobii_camera_hfov_mm = numpy.sin(numpy.deg2rad(TobiiSpecifications.CAMERA_HFOV)) * tobii_gaze_position_3d.value[2]
-
- gaze_accuracy_px = round(tobii_segment_video.width * float(gaze_accuracy_mm) / float(tobii_camera_hfov_mm))
+ # Edit gaze position
+ gaze_position = GazeFeatures.GazePosition(gaze_position_px, accuracy=gaze_accuracy_px)
- # Store gaze position
- ts_gaze_positions[ts] = GazeFeatures.GazePosition(gaze_position_px, accuracy=gaze_accuracy_px)
+ # Project gaze position into selected aois
+ if selected_aoi.contains_point(gaze_position.value):
+
+ inner_x, inner_y = selected_aoi.inner_axis(gaze_position.value)
+
+ # Store inner gaze position for further movement processing
+ ts_gaze_positions[ts] = GazeFeatures.GazePosition((round(inner_x*1920), round(inner_y*1080))) # TEMP: This is Screen_Plan dimension
continue
- # Store unvalid gaze position for further movement processing
- ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition()
+ # Store unvalid gaze position for further movement processing
+ ts_gaze_positions[ts] = GazeFeatures.UnvalidGazePosition()
- print(f'GazeMovement identifier parameters:')
- print(f'\tDispersion threshold = {args.dispersion_threshold}')
- print(f'\tDuration threshold = {args.duration_threshold}')
+ print(f'\nGazePositions projection metrics:')
+ projection_metrics_dataframe = ts_projection_metrics.as_dataframe()
+ print(f'\t AOI age mean (ms) = {projection_metrics_dataframe.age.mean() * 1e-3}')
+ print(f'\t AOI age max (ms) = {projection_metrics_dataframe.age.max() * 1e-3}')
- # Start movement identification
- movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(args.dispersion_threshold, args.duration_threshold*1e3)
- ts_fixations = GazeFeatures.TimeStampedGazeMovements()
- ts_saccades = GazeFeatures.TimeStampedGazeMovements()
- ts_status = GazeFeatures.TimeStampedGazeStatus()
+ print(f'\nGazeMovement identifier parameters:')
+ print(f'\tDispersion threshold = {args.dispersion_threshold}')
+ print(f'\tDuration threshold = {args.duration_threshold}')
- # Initialise progress bar
- MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = 'GazeMovements identification:', suffix = 'Complete', length = 100)
+ # Start movement identification
+ movement_identifier = DispersionBasedGazeMovementIdentifier.GazeMovementIdentifier(args.dispersion_threshold, args.duration_threshold*1e3)
+ ts_fixations = GazeFeatures.TimeStampedGazeMovements()
+ ts_saccades = GazeFeatures.TimeStampedGazeMovements()
+ ts_status = GazeFeatures.TimeStampedGazeStatus()
- for gaze_movement in movement_identifier(ts_gaze_positions):
+ # Initialise progress bar
+ MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGazeMovements identification:', suffix = 'Complete', length = 100)
- if isinstance(gaze_movement, DispersionBasedGazeMovementIdentifier.Fixation):
+ for gaze_movement in movement_identifier(ts_gaze_positions):
- start_ts, start_position = gaze_movement.positions.first
+ if isinstance(gaze_movement, DispersionBasedGazeMovementIdentifier.Fixation):
- ts_fixations[start_ts] = gaze_movement
+ start_ts, start_position = gaze_movement.positions.first
- for ts, position in gaze_movement.positions.items():
+ ts_fixations[start_ts] = gaze_movement
- ts_status[ts] = GazeFeatures.GazeStatus.from_position(position, 'Fixation', len(ts_fixations))
+ for ts, position in gaze_movement.positions.items():
- elif isinstance(gaze_movement, DispersionBasedGazeMovementIdentifier.Saccade):
+ ts_status[ts] = GazeFeatures.GazeStatus.from_position(position, 'Fixation', len(ts_fixations))
- start_ts, start_position = gaze_movement.positions.first
- end_ts, end_position = gaze_movement.positions.last
-
- ts_saccades[start_ts] = gaze_movement
+ elif isinstance(gaze_movement, DispersionBasedGazeMovementIdentifier.Saccade):
- ts_status[start_ts] = GazeFeatures.GazeStatus.from_position(start_position, 'Saccade', len(ts_saccades))
- ts_status[end_ts] = GazeFeatures.GazeStatus.from_position(end_position, 'Saccade', len(ts_saccades))
+ start_ts, start_position = gaze_movement.positions.first
+ end_ts, end_position = gaze_movement.positions.last
+
+ ts_saccades[start_ts] = gaze_movement
- else:
- continue
+ ts_status[start_ts] = GazeFeatures.GazeStatus.from_position(start_position, 'Saccade', len(ts_saccades))
+ ts_status[end_ts] = GazeFeatures.GazeStatus.from_position(end_position, 'Saccade', len(ts_saccades))
- # Update Progress Bar
- progress = ts - int(args.time_range[0] * 1e6)
- MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'GazeMovements identification:', suffix = 'Complete', length = 100)
+ else:
+ continue
- print(f'\n{len(ts_fixations)} fixations and {len(ts_saccades)} saccades found')
+ # Update Progress Bar
+ progress = start_ts - int(args.time_range[0] * 1e6)
+ MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'GazeMovements identification:', suffix = 'Complete', length = 100)
- # Export fixations analysis
- ts_fixations.as_dataframe().to_csv(fixations_filepath, index=True)
- print(f'Fixations saved into {fixations_filepath}')
+ print(f'\nGazeMovements identification metrics:')
+ print(f'\t{len(ts_fixations)} fixations found')
+ print(f'\t{len(ts_saccades)} saccades found')
- # Export saccades analysis
- ts_saccades.as_dataframe().to_csv(saccades_filepath, index=True)
- print(f'Saccades saved into {saccades_filepath}')
+ # Export fixations analysis
+ fixations_dataframe = ts_fixations.as_dataframe()
+ fixations_dataframe.to_csv(fixations_filepath, index=True)
+ print(f'\nFixations saved into {fixations_filepath}')
- # Export gaze status analysis
- ts_status.as_dataframe().to_csv(gaze_status_filepath, index=True)
- print(f'Gaze status saved into {gaze_status_filepath}')
+ # Export saccades analysis
+ saccades_dataframe = ts_saccades.as_dataframe()
+ saccades_dataframe.to_csv(saccades_filepath, index=True)
+ print(f'Saccades saved into {saccades_filepath}')
- # Prepare video exportation at the same format than segment video
- output_video = TobiiVideo.TobiiVideoOutput(gaze_status_video_filepath, tobii_segment_video.stream)
+ # Export gaze status analysis
+ ts_status.as_dataframe().to_csv(gaze_status_filepath, index=True)
+ print(f'Gaze status saved into {gaze_status_filepath}')
- # Video and data loop
- try:
+ # Export gaze metrics
- # Initialise progress bar
- MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = 'Video with movements processing:', suffix = 'Complete', length = 100)
+ # Consider only fixations > duration threshold and saccades < duration threshold
+ # This is mostly usefull to filter first and last fixation/saccade as the time range can start anywhere
+ filtered_fixations = fixations_dataframe[fixations_dataframe.duration > args.duration_threshold*1e3]
+ filtered_saccades = saccades_dataframe[saccades_dataframe.duration < args.duration_threshold*1e3]
- current_fixation_ts, current_fixation = ts_fixations.pop_first()
- current_fixation_time_counter = 0
+ segment_duration = tobii_segment_video.duration * 1e-3
+ exploitation_time = filtered_fixations.duration.sum() * 1e-3
+ exploration_time = filtered_saccades.duration.sum() * 1e-3
- current_saccade_ts, current_saccade = ts_saccades.pop_first()
+ metrics = {
+ 'segment_duration (ms)': segment_duration,
+ 'fixations_number': filtered_fixations.shape[0],
+ 'fixations_duration_mean (ms)': filtered_fixations.duration.mean() * 1e-3,
+ 'saccades_number': filtered_saccades.shape[0],
+ 'saccades_duration_mean (ms)': filtered_saccades.duration.mean() * 1e-3,
+ 'exploitation_ratio (%)': exploitation_time / segment_duration * 100,
+ 'exploration_ratio (%)': exploration_time / segment_duration * 100,
+ 'exploit_explore_ratio:': exploitation_time / exploration_time
+ }
- # Iterate on video frames
- for video_ts, video_frame in tobii_segment_video.frames():
+ metrics_dataframe = pandas.DataFrame(metrics, index=[participant_name])
- # While current time belongs to the current fixation
- if video_ts >= current_fixation_ts and video_ts < current_fixation_ts + current_fixation.duration:
+ metrics_dataframe.to_csv(gaze_metrics_filepath, index=True)
+ print(f'Gaze metrics saved into {gaze_metrics_filepath}')
- current_fixation_time_counter += 1
+ # Prepare video exportation at the same format than segment video
+ output_video = TobiiVideo.TobiiVideoOutput(gaze_status_video_filepath, tobii_segment_video.stream)
- # Draw current fixation
- cv.circle(video_frame.matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.dispersion), (0, 255, 0), current_fixation_time_counter)
+ # Reload aoi scene projection
+ ts_aois_projections = DataStructures.TimeStampedBuffer.from_json(aoi_filepath)
- # Check next fixation
- elif video_ts >= current_fixation_ts + current_fixation.duration and len(ts_fixations) > 0:
+ #
+ heatmap_matrix = numpy.full((1080, 1920, 3), 255, numpy.uint8)
- current_fixation_ts, current_fixation = ts_fixations.pop_first()
- current_fixation_time_counter = 0
+ # Video and data loop
+ try:
- # While current time belongs to the current saccade
- if video_ts >= current_saccade_ts and current_fixation_time_counter == 0:
+ # Initialise progress bar
+ MiscFeatures.printProgressBar(0, tobii_segment_video.duration, prefix = '\nGazeMovements visualisation:', suffix = 'Complete', length = 100)
- start_ts, start_position = current_saccade.positions.first
- end_ts, end_position = current_saccade.positions.last
+ current_fixation_ts, current_fixation = ts_fixations.pop_first()
+ current_fixation_time_counter = 0
- # Draw saccade
- cv.line(video_frame.matrix, start_position, end_position, (0, 0, 255), 2)
+ current_saccade_ts, current_saccade = ts_saccades.pop_first()
- # Check next saccade
- elif video_ts >= current_saccade_ts + current_saccade.duration and len(ts_saccades) > 0:
+ # Iterate on video frames
+ for video_ts, video_frame in tobii_segment_video.frames():
- current_saccade_ts, current_saccade = ts_saccades.pop_first()
+ visu_matrix= numpy.zeros((1080, 1920, 3), numpy.uint8)
- # Check next gaze
- try:
+ try:
- # Get closest gaze position before video timestamp and remove all gaze positions before
- _, nearest_gaze_position = ts_gaze_positions.pop_first_until(video_ts)
+ # Get next aoi projection at video frame time
+ ts_current_aois, current_aois = ts_aois_projections.pop_first()
- # Draw gaze
- nearest_gaze_position.draw(video_frame.matrix)
+ assert(ts_current_aois == video_ts)
- # Wait for gaze position
- except KeyError:
- pass
+ selected_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))
- # Write segment timing
- cv.rectangle(video_frame.matrix, (0, 0), (550, 50), (63, 63, 63), -1)
- cv.putText(video_frame.matrix, f'Segment time: {int(video_ts/1e3)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
-
- # Write movement identification parameters
- cv.rectangle(video_frame.matrix, (0, 90), (550, 150), (63, 63, 63), -1)
- cv.putText(video_frame.matrix, f'Dispersion threshold: {args.dispersion_threshold} px', (20, 100), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
- cv.putText(video_frame.matrix, f'Duration threshold: {args.duration_threshold} ms', (20, 140), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+ # Wait for aois projection
+ except KeyError:
+
+ continue
+
+ # Apply Perspective Transform Algorithm
+ destination = numpy.float32([[0, 0],[1920, 0],[1920, 1080],[0, 1080]])
+ aoi_matrix = cv.getPerspectiveTransform(selected_aoi.astype(numpy.float32), destination)
+ visu_matrix = cv.warpPerspective(video_frame.matrix, aoi_matrix, (1920, 1080))
+
+ # While current time belongs to the current fixation
+ if video_ts >= current_fixation_ts and video_ts < current_fixation_ts + current_fixation.duration:
+
+ current_fixation_time_counter += 1
+
+ # Draw current fixation
+ cv.circle(visu_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.dispersion), (0, 255, 0), current_fixation_time_counter)
+
+ #
+ cv.circle(heatmap_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.dispersion), (0, 255, 0))
+
+ # Check next fixation
+ elif video_ts >= current_fixation_ts + current_fixation.duration and len(ts_fixations) > 0:
+
+ current_fixation_ts, current_fixation = ts_fixations.pop_first()
+ current_fixation_time_counter = 0
- if args.window:
+ # While current time belongs to the current saccade
+ if video_ts >= current_saccade_ts and current_fixation_time_counter == 0:
- # Close window using 'Esc' key
- if cv.waitKey(1) == 27:
- break
+ start_ts, start_position = current_saccade.positions.first
+ end_ts, end_position = current_saccade.positions.last
- # Display video
- cv.imshow(f'Segment {tobii_segment.id} movements', video_frame.matrix)
+ # Draw saccade
+ int_start_position = (int(start_position[0]), int(start_position[1]))
+ int_end_position = (int(end_position[0]), int(end_position[1]))
- # Write video
- output_video.write(video_frame.matrix)
+ cv.line(visu_matrix, int_start_position, int_end_position, (0, 0, 255), 2)
+ cv.line(heatmap_matrix, int_start_position, int_end_position, (0, 0, 255), 2)
+
+ # Check next saccade
+ elif video_ts >= current_saccade_ts + current_saccade.duration and len(ts_saccades) > 0:
+
+ current_saccade_ts, current_saccade = ts_saccades.pop_first()
+
+ # Check next gaze
+ try:
+
+ # Get closest gaze position before video timestamp and remove all gaze positions before
+ _, nearest_gaze_position = ts_gaze_positions.pop_last_before(video_ts)
+
+ # Draw gaze
+ nearest_gaze_position.draw(visu_matrix)
+
+ # Wait for gaze position
+ except KeyError:
+ pass
+
+ # Write segment timing
+ cv.rectangle(visu_matrix, (0, 0), (550, 50), (63, 63, 63), -1)
+ cv.putText(visu_matrix, f'Segment time: {int(video_ts/1e3)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+
+ # Write movement identification parameters
+ cv.rectangle(visu_matrix, (0, 90), (550, 150), (63, 63, 63), -1)
+ cv.putText(visu_matrix, f'Dispersion threshold: {args.dispersion_threshold} px', (20, 100), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+ cv.putText(visu_matrix, f'Duration threshold: {args.duration_threshold} ms', (20, 140), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
+
+ if args.window:
+
+ # Close window using 'Esc' key
+ if cv.waitKey(1) == 27:
+ break
+
+ # Display video
+ cv.imshow(f'Segment {tobii_segment.id} movements', visu_matrix)
+
+ # Write video
+ output_video.write(visu_matrix)
+
+ # Update Progress Bar
+ progress = video_ts - int(args.time_range[0] * 1e6)
+ MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'Video with movements processing:', suffix = 'Complete', length = 100)
- # Update Progress Bar
- progress = video_ts - int(args.time_range[0] * 1e6)
- MiscFeatures.printProgressBar(progress, tobii_segment_video.duration, prefix = 'Video with movements processing:', suffix = 'Complete', length = 100)
+ # Exit on 'ctrl+C' interruption
+ except KeyboardInterrupt:
+ pass
- # Exit on 'ctrl+C' interruption
- except KeyboardInterrupt:
- pass
+ #
+ cv.imwrite(gaze_status_image_filepath, heatmap_matrix)
- # End output video file
- output_video.close()
- print(f'\nVideo with movements saved into {gaze_status_video_filepath}')
+ # End output video file
+ output_video.close()
+ print(f'\nVideo with movements saved into {gaze_status_video_filepath}\n')
if __name__ == '__main__':