aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorThéo de la Hogue2022-12-09 01:04:30 +0100
committerThéo de la Hogue2022-12-09 01:04:30 +0100
commitba1a1eb9d76083b43f2b3c4ffa18651f2e516e6f (patch)
tree5e51f0fcdb897505840058a2a756cdd3aa5f7dd7 /src
parent3d8d16d62d4cd738747b72d13efe837236be555c (diff)
downloadargaze-ba1a1eb9d76083b43f2b3c4ffa18651f2e516e6f.zip
argaze-ba1a1eb9d76083b43f2b3c4ffa18651f2e516e6f.tar.gz
argaze-ba1a1eb9d76083b43f2b3c4ffa18651f2e516e6f.tar.bz2
argaze-ba1a1eb9d76083b43f2b3c4ffa18651f2e516e6f.tar.xz
Setting black background for scan path visualisaiton. Removing bad continu. Changing saccade display condititon.
Diffstat (limited to 'src')
-rw-r--r--src/argaze/utils/tobii_segment_gaze_movements_export.py24
1 files changed, 11 insertions, 13 deletions
diff --git a/src/argaze/utils/tobii_segment_gaze_movements_export.py b/src/argaze/utils/tobii_segment_gaze_movements_export.py
index 426d4bf..0f7bd64 100644
--- a/src/argaze/utils/tobii_segment_gaze_movements_export.py
+++ b/src/argaze/utils/tobii_segment_gaze_movements_export.py
@@ -270,7 +270,7 @@ def main():
ts_aois_projections = DataStructures.TimeStampedBuffer.from_json(aoi_filepath)
#
- heatmap_matrix = numpy.full((1080, 1920, 3), 255, numpy.uint8)
+ heatmap_matrix = numpy.zeros((1080, 1920, 3), numpy.uint8)
# Video and data loop
try:
@@ -286,7 +286,7 @@ def main():
# Iterate on video frames
for video_ts, video_frame in tobii_segment_video.frames():
- visu_matrix= numpy.zeros((1080, 1920, 3), numpy.uint8)
+ visu_matrix = numpy.zeros((1080, 1920, 3), numpy.uint8)
try:
@@ -297,15 +297,14 @@ def main():
selected_aoi = AOIFeatures.AreaOfInterest(current_aois.pop(args.aoi))
+ # Apply Perspective Transform Algorithm
+ destination = numpy.float32([[0, 0],[1920, 0],[1920, 1080],[0, 1080]])
+ aoi_matrix = cv.getPerspectiveTransform(selected_aoi.astype(numpy.float32), destination)
+ visu_matrix = cv.warpPerspective(video_frame.matrix, aoi_matrix, (1920, 1080))
+
# Wait for aois projection
except KeyError:
-
- continue
-
- # Apply Perspective Transform Algorithm
- destination = numpy.float32([[0, 0],[1920, 0],[1920, 1080],[0, 1080]])
- aoi_matrix = cv.getPerspectiveTransform(selected_aoi.astype(numpy.float32), destination)
- visu_matrix = cv.warpPerspective(video_frame.matrix, aoi_matrix, (1920, 1080))
+ pass
# While current time belongs to the current fixation
if video_ts >= current_fixation_ts and video_ts < current_fixation_ts + current_fixation.duration:
@@ -314,8 +313,6 @@ def main():
# Draw current fixation
cv.circle(visu_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.dispersion), (0, 255, 0), current_fixation_time_counter)
-
- #
cv.circle(heatmap_matrix, (int(current_fixation.centroid[0]), int(current_fixation.centroid[1])), int(current_fixation.dispersion), (0, 255, 0))
# Check next fixation
@@ -325,7 +322,7 @@ def main():
current_fixation_time_counter = 0
# While current time belongs to the current saccade
- if video_ts >= current_saccade_ts and current_fixation_time_counter == 0:
+ if video_ts >= current_saccade_ts and video_ts <= current_saccade_ts + current_saccade.duration:
start_ts, start_position = current_saccade.positions.first
end_ts, end_position = current_saccade.positions.last
@@ -338,7 +335,7 @@ def main():
cv.line(heatmap_matrix, int_start_position, int_end_position, (0, 0, 255), 2)
# Check next saccade
- elif video_ts >= current_saccade_ts + current_saccade.duration and len(ts_saccades) > 0:
+ elif video_ts > current_saccade_ts + current_saccade.duration and len(ts_saccades) > 0:
current_saccade_ts, current_saccade = ts_saccades.pop_first()
@@ -350,6 +347,7 @@ def main():
# Draw gaze
nearest_gaze_position.draw(visu_matrix)
+ nearest_gaze_position.draw(heatmap_matrix)
# Wait for gaze position
except KeyError: