aboutsummaryrefslogtreecommitdiff
path: root/src/argaze/utils/contexts/TobiiProGlasses2.py
diff options
context:
space:
mode:
Diffstat (limited to 'src/argaze/utils/contexts/TobiiProGlasses2.py')
-rw-r--r--src/argaze/utils/contexts/TobiiProGlasses2.py91
1 files changed, 63 insertions, 28 deletions
diff --git a/src/argaze/utils/contexts/TobiiProGlasses2.py b/src/argaze/utils/contexts/TobiiProGlasses2.py
index 3dd0161..21843a0 100644
--- a/src/argaze/utils/contexts/TobiiProGlasses2.py
+++ b/src/argaze/utils/contexts/TobiiProGlasses2.py
@@ -330,12 +330,12 @@ class TobiiJsonDataParser():
return MarkerPosition(data['marker3d'], data['marker2d'])
-class LiveStream(ArFeatures.LiveProcessingContext):
+class LiveStream(ArFeatures.DataCaptureContext):
@DataFeatures.PipelineStepInit
def __init__(self, **kwargs):
- # Init LiveProcessingContext class
+ # Init DataCaptureContext class
super().__init__()
# Init private attributes
@@ -599,6 +599,12 @@ class LiveStream(ArFeatures.LiveProcessingContext):
logging.debug('> starting battery status thread...')
self.__check_battery_thread.start()
+ # Init calibration status
+ self.__calibration_status = 'uncalibrated'
+
+ # Init recording status
+ self.__recording_status = 'stopped'
+
return self
@DataFeatures.PipelineStepExit
@@ -629,7 +635,6 @@ class LiveStream(ArFeatures.LiveProcessingContext):
threading.Thread.join(self.__video_thread)
-
def __make_socket(self):
"""Create a socket to enable network communication."""
@@ -742,15 +747,15 @@ class LiveStream(ArFeatures.LiveProcessingContext):
# Check image validity
if image is None:
- # Wait for half frame time
- time.sleep(2 / self.__video_fps)
+ # Wait 1ms
+ time.sleep(1e-3)
continue
# Check image time validity
if image.time is None:
- # Wait for half frame time
- time.sleep(2 / self.__video_fps)
+ # Wait 1ms
+ time.sleep(1e-3)
continue
# Store first timestamp
@@ -786,9 +791,6 @@ class LiveStream(ArFeatures.LiveProcessingContext):
while not self._stop_event.is_set():
- # Wait for half frame time
- time.sleep(2 / self.__video_fps)
-
# Lock buffer access
with self.__video_buffer_lock:
@@ -812,6 +814,9 @@ class LiveStream(ArFeatures.LiveProcessingContext):
# Clear buffer
self.__video_buffer = None
+ # Wait 1ms
+ time.sleep(1e-3)
+
def __keep_alive(self):
"""Maintain network connection."""
@@ -931,7 +936,7 @@ class LiveStream(ArFeatures.LiveProcessingContext):
"""Handle whole Tobii glasses calibration process."""
# Reset calibration
- self.__calibration_status = None
+ self.__calibration_status = 'uncalibrated'
self.__calibration_id = None
# Calibration have to be done for a project and a participant
@@ -989,7 +994,7 @@ class LiveStream(ArFeatures.LiveProcessingContext):
"""Create a new recording on the Tobii interface's SD Card."""
# Reset recording
- self.__recording_status = None
+ self.__recording_status = 'stopped'
self.__recording_id = None
# Recording have to be done for a participant
@@ -1033,6 +1038,11 @@ class LiveStream(ArFeatures.LiveProcessingContext):
self.__recording_status = self.__wait_for_recording_status(self.__recording_id, ['paused'])
def get_recording_status(self) -> str:
+ """Get recording status.
+
+ Returns:
+ status: 'init', 'starting', 'recording', 'pausing', 'paused', 'stopping', 'stopped', 'done', 'stale' or 'failed' string
+ """
return self.__recording_status
@@ -1067,9 +1077,9 @@ class LiveStream(ArFeatures.LiveProcessingContext):
@DataFeatures.PipelineStepImage
def image(self, **kwargs):
"""
- Get pipeline image with live processing information.
+ Get pipeline image with data capture information.
"""
- logging.debug('LiveProcessingContext.image %s', self.name)
+ logging.debug('DataCaptureContext.image %s', self.name)
image = super().image(**kwargs)
height, width, _ = image.shape
@@ -1077,7 +1087,7 @@ class LiveStream(ArFeatures.LiveProcessingContext):
# Display calibration status
calibration_panel = ((int(width/2), 0), (width, 50))
- if self.__calibration_status is None:
+ if self.__calibration_status == 'uncalibrated':
cv2.rectangle(image, calibration_panel[0], calibration_panel[1], (0, 0, 0), -1)
cv2.putText(image, 'Calibration required', (calibration_panel[0][0]+20, calibration_panel[0][1]+40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
@@ -1090,7 +1100,7 @@ class LiveStream(ArFeatures.LiveProcessingContext):
elif self.__calibration_status != 'calibrated':
cv2.rectangle(image, calibration_panel[0], calibration_panel[1], (0, 0, 127), -1)
- cv2.putText(image, f'Calibration {calibration_status}', (calibration_panel[0][0]+20, calibration_panel[0][1]+40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Calibration {self.__calibration_status}', (calibration_panel[0][0]+20, calibration_panel[0][1]+40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
else:
@@ -1115,7 +1125,7 @@ class LiveStream(ArFeatures.LiveProcessingContext):
cv2.putText(image, f'Battery {self.__battery_level}%', (width - 220, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, text_color, 1, cv2.LINE_AA)
# Display recording status
- if self.__recording_status is None:
+ if self.__recording_status == 'stopped':
circle_color = (0, 0, 0)
@@ -1131,7 +1141,7 @@ class LiveStream(ArFeatures.LiveProcessingContext):
return image
-class PostProcessing(ArFeatures.PostProcessingContext):
+class SegmentPlayback(ArFeatures.DataPlaybackContext):
@DataFeatures.PipelineStepInit
def __init__(self, **kwargs):
@@ -1170,6 +1180,7 @@ class PostProcessing(ArFeatures.PostProcessingContext):
self.__sync_event_unit = None
self.__sync_event_factor = None
self.__sync_data_ts = None
+ self.__sync_video_ts = None
self.__sync_ts = None
self.__last_sync_data_ts = None
self.__last_sync_ts = None
@@ -1302,8 +1313,22 @@ class PostProcessing(ArFeatures.PostProcessingContext):
logging.debug('> read image at %i timestamp', video_ts)
- # Process camera image
- self._process_camera_image(timestamp=video_ts, image=video_image)
+ # if sync is required
+ if self.__sync_event is not None:
+
+ # Wait for a first sync event
+ if self.__sync_ts is not None:
+
+ self.__sync_video_ts = int(self.__sync_ts + video_ts - self.__sync_data_ts)
+
+ # Process camera image
+ self._process_camera_image(timestamp=self.__sync_video_ts, image=video_image)
+
+ # Otherwise, always process images
+ elif self.__sync_event is None:
+
+ # Process camera image
+ self._process_camera_image(timestamp=video_ts, image=video_image)
height, width, _ = video_image.shape
@@ -1348,15 +1373,17 @@ class PostProcessing(ArFeatures.PostProcessingContext):
logging.info('Difference between data and sync event timestamps is %i ms', diff_data_ts-diff_sync_ts)
- # Don't process gaze positions if sync is required but sync event not happened yet
- if self.__sync_event is not None and self.__sync_ts is None:
+ # Don't process gaze positions when:
+ # - no image have been processed yet
+ # - no sync event happened yet
+ if self.__sync_video_ts is None or self.__sync_ts is None:
- continue
+ continue
- # Otherwise, synchronize timestamp with sync event
- elif self.__sync_event is not None and self.__sync_ts is not None:
+ # Otherwise, synchronize timestamp with sync event
+ else:
- data_ts = int(self.__sync_ts + data_ts - self.__sync_data_ts)
+ data_ts = int(self.__sync_ts + data_ts - self.__sync_data_ts)
# Process gaze positions
match data_object_type:
@@ -1506,7 +1533,15 @@ class PostProcessing(ArFeatures.PostProcessingContext):
raise StopIteration
# Parse data
- data_object, data_object_type = self.__parser.parse_data(status, data)
+ try:
+
+ data_object, data_object_type = self.__parser.parse_data(status, data)
+
+ except Exception as e:
+
+ logging.error('%s while parsing livedata.json.gz at %i timestamp', e, ts)
+
+ return self.__next_data()
# Return millisecond timestamp, data object and type
return ts * 1e-3, data_object, data_object_type
@@ -1519,6 +1554,6 @@ class PostProcessing(ArFeatures.PostProcessingContext):
@property
def progression(self) -> float:
- """Get data processing progression between 0 and 1."""
+ """Get data playback progression between 0 and 1."""
return self.__progression \ No newline at end of file