aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThéo de la Hogue2022-04-27 13:58:01 +0200
committerThéo de la Hogue2022-04-27 13:58:01 +0200
commit82b864bada496c89325dcaec5fcf062b775d575a (patch)
treeb7716782532b0ba2e0ac2d24fe90ebd3f7d5904b
parent0cc4c65d28c236d8bba1c7eeb6be83c905630245 (diff)
downloadargaze-82b864bada496c89325dcaec5fcf062b775d575a.zip
argaze-82b864bada496c89325dcaec5fcf062b775d575a.tar.gz
argaze-82b864bada496c89325dcaec5fcf062b775d575a.tar.bz2
argaze-82b864bada496c89325dcaec5fcf062b775d575a.tar.xz
Improving video presentation timestamp management.
-rw-r--r--src/argaze/TobiiGlassesPro2/TobiiData.py25
-rw-r--r--src/argaze/TobiiGlassesPro2/TobiiVideo.py37
-rw-r--r--src/argaze/utils/export_tobii_segment_aruco_visual_scan.py14
-rw-r--r--src/argaze/utils/export_tobii_segment_fixations.py1
-rw-r--r--src/argaze/utils/replay_tobii_session.py16
5 files changed, 40 insertions, 53 deletions
diff --git a/src/argaze/TobiiGlassesPro2/TobiiData.py b/src/argaze/TobiiGlassesPro2/TobiiData.py
index bcd2264..1b6dacf 100644
--- a/src/argaze/TobiiGlassesPro2/TobiiData.py
+++ b/src/argaze/TobiiGlassesPro2/TobiiData.py
@@ -17,7 +17,9 @@ class TobiiDataSegment(DataStructures.DictObject):
"""Load segment data from segment directory then parse and register each recorded dataflow as a TimeStampedBuffer member of the TobiiSegmentData instance."""
self.__segment_data_path = segment_data_path
- self.__first_ts = 0
+
+ self.__vts_offset = 0
+ self.__vts_ts = -1
ts_data_buffer_dict = {}
@@ -30,11 +32,22 @@ class TobiiDataSegment(DataStructures.DictObject):
# convert timestamp
ts = json_data.pop('ts')
- # keep first timestamp to offset all timestamps
- if self.__first_ts == 0:
- self.__first_ts = ts
+ # watch for vts data to offset timestamps
+ try:
+ self.__vts_offset = json_data['vts']
+ self.__vts_ts = ts
- ts -= self.__first_ts
+ return True # continue
+
+ except KeyError:
+ pass
+
+ # ignore data before first vts entry
+ if self.__vts_ts == -1:
+ return True # continue
+
+ ts -= self.__vts_ts
+ ts += self.__vts_offset
# ignore timestamps out of the given time range
if ts < start_timestamp:
@@ -67,7 +80,7 @@ class TobiiDataSegment(DataStructures.DictObject):
def keys(self):
"""Get all registered data keys"""
- return list(self.__dict__.keys())[2:-1]
+ return list(self.__dict__.keys())[3:-1]
def get_path(self):
return self.__segment_data_path
diff --git a/src/argaze/TobiiGlassesPro2/TobiiVideo.py b/src/argaze/TobiiGlassesPro2/TobiiVideo.py
index f3c8c78..f9a67d3 100644
--- a/src/argaze/TobiiGlassesPro2/TobiiVideo.py
+++ b/src/argaze/TobiiGlassesPro2/TobiiVideo.py
@@ -35,9 +35,6 @@ class TobiiVideoSegment():
self.__width = int(cv.VideoCapture(self.__segment_video_path).get(cv.CAP_PROP_FRAME_WIDTH))
self.__height = int(cv.VideoCapture(self.__segment_video_path).get(cv.CAP_PROP_FRAME_HEIGHT))
- self.__vts_data_buffer = None
- self.__vts_offset = 0
-
self.__start_timestamp = start_timestamp
self.__end_timestamp = end_timestamp
@@ -66,22 +63,8 @@ class TobiiVideoSegment():
def get_vts_offset(self):
return self.__vts_offset
- def frames(self, vts_data_buffer = None):
- """Access to frame iterator and optionnaly setup video / data timestamp synchronisation through vts data buffer."""
-
- self.__vts_data_buffer = vts_data_buffer
-
- # Enable video / data timestamp synchronisation
- if self.__vts_data_buffer != None:
-
- self.__vts_ts, self.__vts = self.__vts_data_buffer.pop_first()
-
- # pop vts buffer until start timestamp
- while self.__start_timestamp > self.__vts.vts:
- if len(self.__vts_data_buffer) > 0:
- self.__vts_ts, self.__vts = self.__vts_data_buffer.pop_first()
-
- self.__vts_offset = (self.__vts_ts - self.__vts.vts)
+ def frames(self):
+ """Access to frame iterator."""
return self.__iter__()
@@ -98,24 +81,16 @@ class TobiiVideoSegment():
video_ts = int(frame.time * 1000000)
+ # Ignore before start timestamp
+ if video_ts < self.__start_timestamp:
+ return self.__next__()
+
# Ignore frames after end timestamp
if self.__end_timestamp != None:
if video_ts >= self.__end_timestamp:
raise StopIteration
- # If video / data synchronisation is active
- if self.__vts_data_buffer != None:
-
- if video_ts > self.__vts.vts:
-
- if len(self.__vts_data_buffer) > 0:
-
- self.__vts_ts, self.__vts = self.__vts_data_buffer.pop_first()
- self.__vts_offset = (self.__vts_ts - self.__vts.vts)
-
- video_ts += self.__vts_offset
-
# return micro second timestamp and frame data
return video_ts, TobiiVideoFrame(frame.to_ndarray(format='bgr24'), frame.width, frame.height)
diff --git a/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py b/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py
index 4089d2f..02f5cb2 100644
--- a/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py
+++ b/src/argaze/utils/export_tobii_segment_aruco_visual_scan.py
@@ -101,24 +101,24 @@ def main():
# Initialise progress bar
MiscFeatures.printProgressBar(0, tobii_segment_video.get_duration(), prefix = 'Progress:', suffix = 'Complete', length = 100)
- # Iterate on video frames activating video / data synchronisation through vts data buffer
- for video_ts, video_frame in tobii_segment_video.frames(tobii_segment_data.vts):
+ # Iterate on video frames
+ for video_ts, video_frame in tobii_segment_video.frames():
try:
# Get closest gaze position before video timestamp and remove all gaze positions before
closest_gaze_ts, closest_gaze_position = tobii_ts_gaze_positions.pop_first_until(video_ts)
- # Draw video synchronized gaze position
+ # Draw gaze position
gaze_position = GazeFeatures.GazePosition(int(closest_gaze_position.gp[0] * video_frame.width), int(closest_gaze_position.gp[1] * video_frame.height))
cv.circle(video_frame.matrix, gaze_position.as_tuple(), 4, (0, 255, 255), -1)
# Store gaze position at this time in millisecond
ts_gaze_positions[video_ts/1000] = gaze_position
- # When expected values can't be found
- except (KeyError, AttributeError, ValueError):
- pass # keep last gaze position
+ # Wait for gaze position
+ except ValueError:
+ continue
# Track markers with pose estimation and draw them
aruco_tracker.track(video_frame.matrix)
@@ -163,7 +163,7 @@ def main():
output_video.write(video_frame.matrix)
# Update Progress Bar
- progress = video_ts - int(args.time_range[0] * 1000000) # - tobii_segment_video.get_vts_offset() ?
+ progress = video_ts - int(args.time_range[0] * 1000000)
MiscFeatures.printProgressBar(progress, tobii_segment_video.get_duration(), prefix = 'Progress:', suffix = 'Complete', length = 100)
# Exit on 'ctrl+C' interruption
diff --git a/src/argaze/utils/export_tobii_segment_fixations.py b/src/argaze/utils/export_tobii_segment_fixations.py
index 7fbdf50..b257a88 100644
--- a/src/argaze/utils/export_tobii_segment_fixations.py
+++ b/src/argaze/utils/export_tobii_segment_fixations.py
@@ -51,7 +51,6 @@ def main():
# Access to timestamped gaze position data buffer
tobii_ts_gaze_positions = tobii_segment_data.gidx_l_gp
-
print(f'{len(tobii_ts_gaze_positions)} gaze positions loaded')
# Format tobii gaze data into generic gaze data and store them using millisecond unit timestamp
diff --git a/src/argaze/utils/replay_tobii_session.py b/src/argaze/utils/replay_tobii_session.py
index 0471506..20607c2 100644
--- a/src/argaze/utils/replay_tobii_session.py
+++ b/src/argaze/utils/replay_tobii_session.py
@@ -41,21 +41,21 @@ def main():
# Video and data replay loop
try:
- # Iterate on video frames activating video / data synchronisation through vts data buffer
- for video_ts, video_frame in tobii_segment_video.frames(tobii_segment_data.vts):
+ # Iterate on video frames
+ for video_ts, video_frame in tobii_segment_video.frames():
try:
# Get closest gaze position before video timestamp and remove all gaze positions before
closest_gaze_ts, closest_gaze_position = tobii_ts_gaze_positions.pop_first_until(video_ts)
- # Draw video synchronized gaze pointer
- pointer = (int(closest_gaze_position.gp[0] * video_frame.width), int(closest_gaze_position.gp[1] * video_frame.height))
- cv.circle(video_frame.matrix, pointer, 4, (0, 255, 255), -1)
+ # Draw gaze position
+ gaze_position = (int(closest_gaze_position.gp[0] * video_frame.width), int(closest_gaze_position.gp[1] * video_frame.height))
+ cv.circle(video_frame.matrix, gaze_position.as_tuple(), 4, (0, 255, 255), -1)
- # When expected values can't be found
- except (KeyError, AttributeError, ValueError):
- pass
+ # Wait for gaze position
+ except ValueError:
+ continue
# Close window using 'Esc' key
if cv.waitKey(1) == 27: