aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThéo de la Hogue2024-06-27 20:27:20 +0200
committerThéo de la Hogue2024-06-27 20:27:20 +0200
commit46ec14351ab21e74e80651e3ae7fd9e41572bbba (patch)
tree1785e8cf76ba29ba675ba0ab3193b36b456ec377
parent1088273cf8268fecdd0cb1b8b19ef9add7707118 (diff)
downloadargaze-46ec14351ab21e74e80651e3ae7fd9e41572bbba.zip
argaze-46ec14351ab21e74e80651e3ae7fd9e41572bbba.tar.gz
argaze-46ec14351ab21e74e80651e3ae7fd9e41572bbba.tar.bz2
argaze-46ec14351ab21e74e80651e3ae7fd9e41572bbba.tar.xz
Adding previous and next methods. Hidding decimal after third one.
-rw-r--r--src/argaze/ArFeatures.py12
-rw-r--r--src/argaze/__main__.py61
-rw-r--r--src/argaze/utils/contexts/OpenCV.py172
-rw-r--r--src/argaze/utils/demo/opencv_movie_context.json7
4 files changed, 223 insertions, 29 deletions
diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py
index 38d1759..2d9c281 100644
--- a/src/argaze/ArFeatures.py
+++ b/src/argaze/ArFeatures.py
@@ -1653,7 +1653,7 @@ class ArContext(DataFeatures.PipelineStepObject):
if image.is_timestamped():
info_stack += 1
- cv2.putText(image, f'Frame at {image.timestamp}ms', (20, info_stack * 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Frame at {image.timestamp:.3f}ms', (20, info_stack * 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
# Draw watch time if relevant
if issubclass(type(self.__pipeline), ArCamera):
@@ -1755,6 +1755,16 @@ class PostProcessingContext(ArContext):
super().__init__()
self._image_parameters = {**DEFAULT_ARCONTEXT_IMAGE_PARAMETERS, **DEFAULT_POST_PROCESSING_CONTEXT_IMAGE_PARAMETERS}
+
+ def previous(self):
+ """Go to previous frame"""
+
+ raise NotImplementedError
+
+ def next(self):
+ """Go to next frame"""
+
+ raise NotImplementedError
@property
def duration(self) -> int|float:
diff --git a/src/argaze/__main__.py b/src/argaze/__main__.py
index 926d572..f9433c0 100644
--- a/src/argaze/__main__.py
+++ b/src/argaze/__main__.py
@@ -179,30 +179,31 @@ def load_context(args):
key_pressed = cv2.waitKey(40)
#print("key_pressed", key_pressed)
- # Enter: start calibration
- if key_pressed == 13:
-
- if issubclass(type(context), LiveProcessingContext):
+ # f: disable/enable pipeline drawing
+ if key_pressed == 102:
- context.calibrate()
+ draw_pipeline = not draw_pipeline
- # Space bar: pause/resume pipeline processing
- if key_pressed == 32:
+ # h: disable/enable help drawing
+ if key_pressed == 104:
- if issubclass(type(context), PostProcessingContext):
+ draw_help = not draw_help
- if context.is_paused():
+ # Esc: close window
+ if key_pressed == 27:
- context.resume()
+ raise KeyboardInterrupt()
- else:
+ # Keys specific to live processing contexts
+ if issubclass(type(context), LiveProcessingContext):
- context.pause()
+ # Enter: start calibration
+ if key_pressed == 13:
- # r: start/stop recording
- if key_pressed == 114:
+ context.calibrate()
- if issubclass(type(context), LiveProcessingContext):
+ # r: start/stop recording
+ if key_pressed == 114:
# FIXME: the following commands only work with TobiiGlassesPro2.LiveStream context.
recording_status = context.get_recording_status()
@@ -216,21 +217,31 @@ def load_context(args):
context.create_recording()
context.start_recording()
- # f: disable/enable pipeline drawing
- if key_pressed == 102:
+ # Keys specific to post processing contexts
+ if issubclass(type(context), PostProcessingContext):
- draw_pipeline = not draw_pipeline
+ # Space bar: pause/resume pipeline processing
+ if key_pressed == 32:
- # h: disable/enable help drawing
- if key_pressed == 104:
- draw_help = not draw_help
+ if context.is_paused():
- # Esc: close window
- if key_pressed == 27:
+ context.resume()
- raise KeyboardInterrupt()
-
+ else:
+
+ context.pause()
+
+ # Select previous image with left arrow
+ if key_pressed == 2:
+
+ context.previous()
+
+ # Select next image with right arrow
+ if key_pressed == 3:
+
+ context.next()
+
# Window mode off
else:
diff --git a/src/argaze/utils/contexts/OpenCV.py b/src/argaze/utils/contexts/OpenCV.py
index 20b01bc..c2361a8 100644
--- a/src/argaze/utils/contexts/OpenCV.py
+++ b/src/argaze/utils/contexts/OpenCV.py
@@ -18,6 +18,7 @@ __copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)"
__license__ = "GPLv3"
import logging
+import threading
import time
import cv2
@@ -30,13 +31,13 @@ class Window(ArFeatures.LiveProcessingContext):
@DataFeatures.PipelineStepInit
def __init__(self, **kwargs):
- # Init ArContext class
+ # Init LiveProcessingContext class
super().__init__()
@DataFeatures.PipelineStepEnter
def __enter__(self):
- logging.info('OpenCV context starts...')
+ logging.info('OpenCV window context starts...')
# Create a window to display context
cv2.namedWindow(self.name, cv2.WINDOW_AUTOSIZE)
@@ -52,7 +53,7 @@ class Window(ArFeatures.LiveProcessingContext):
@DataFeatures.PipelineStepExit
def __exit__(self, exception_type, exception_value, exception_traceback):
- logging.info('OpenCV context stops...')
+ logging.info('OpenCV window context stops...')
# Delete window
cv2.destroyAllWindows()
@@ -66,3 +67,168 @@ class Window(ArFeatures.LiveProcessingContext):
# Process timestamped gaze position
self._process_gaze_position(timestamp = int((time.time() - self.__start_time) * 1e3), x = x, y = y)
+
+
+class Movie(ArFeatures.PostProcessingContext):
+
+ @DataFeatures.PipelineStepInit
+ def __init__(self, **kwargs):
+
+ # Init PostProcessingContext class
+ super().__init__()
+
+ # Init private attributes
+ self.__path = None
+ self.__movie = None
+ self.__movie_fps = None
+ self.__movie_width = None
+ self.__movie_height = None
+ self.__movie_length = None
+
+ self.__current_image_index = None
+ self.__next_image_index = None
+ self.__refresh = False
+
+ @property
+ def path(self) -> str:
+ """Movie file path."""
+ return self.__path
+
+ @path.setter
+ def path(self, path: str):
+
+ self.__path = path
+
+ # Load movie
+ self.__movie = cv2.VideoCapture(self.__path)
+ self.__movie_fps = self.__movie.get(cv2.CAP_PROP_FPS)
+ self.__movie_width = int(self.__movie.get(cv2.CAP_PROP_FRAME_WIDTH))
+ self.__movie_height = int(self.__movie.get(cv2.CAP_PROP_FRAME_HEIGHT))
+ self.__movie_length = self.__movie.get(cv2.CAP_PROP_FRAME_COUNT)
+
+ @DataFeatures.PipelineStepEnter
+ def __enter__(self):
+
+ logging.info('OpenCV movie context starts...')
+
+ # Create a window to display context
+ cv2.namedWindow(self.name, cv2.WINDOW_AUTOSIZE)
+
+ # Init timestamp
+ self.__start_time = time.time()
+
+ # Attach mouse event callback to window
+ cv2.setMouseCallback(self.name, self.__on_mouse_event)
+
+ # Open reading thread
+ self.__reading_thread = threading.Thread(target=self.__read)
+
+ logging.debug('> starting reading thread...')
+ self.__reading_thread.start()
+
+ return self
+
+ def __read(self):
+ """Iterate on movie images."""
+
+ # Init image selection
+ _, current_image = self.__movie.read()
+ current_image_time = self.__movie.get(cv2.CAP_PROP_POS_MSEC)
+ self.__next_image_index = 0 #int(self.__start * self.__movie_fps)
+
+ while not self._stop_event.is_set():
+
+ # Check pause event (and stop event)
+ while self._pause_event.is_set() and not self._stop_event.is_set():
+
+ logging.debug('> reading is paused at %i', current_image_time)
+
+ time.sleep(1)
+
+ # Select a new image and detect markers once
+ if self.__next_image_index != self.__current_image_index or self.__refresh:
+
+ self.__movie.set(cv2.CAP_PROP_POS_FRAMES, self.__next_image_index)
+
+ success, image = self.__movie.read()
+
+ video_height, video_width, _ = image.shape
+
+ if success:
+
+ # Refresh once
+ self.__refresh = False
+
+ self.__current_image_index = self.__movie.get(cv2.CAP_PROP_POS_FRAMES) - 1
+ current_image_time = self.__movie.get(cv2.CAP_PROP_POS_MSEC)
+
+ # Timestamp image
+ image = DataFeatures.TimestampedImage(image, timestamp=current_image_time)
+
+ # Process movie image
+ self._process_camera_image(timestamp=current_image_time, image=image)
+
+ # Wait
+ time.sleep(1 / self.__movie_fps)
+
+ @DataFeatures.PipelineStepExit
+ def __exit__(self, exception_type, exception_value, exception_traceback):
+
+ logging.info('OpenCV movie context stops...')
+
+ # Close data stream
+ self._stop_event.set()
+
+ # Stop reading thread
+ threading.Thread.join(self.__reading_thread)
+
+ # Delete window
+ cv2.destroyAllWindows()
+
+ def __on_mouse_event(self, event, x, y, flags, param):
+ """Process pointer position."""
+
+ logging.debug('Window.on_mouse_event %i %i', x, y)
+
+ if not self.is_paused():
+
+ # Process timestamped gaze position
+ self._process_gaze_position(timestamp = int((time.time() - self.__start_time) * 1e3), x = x, y = y)
+
+ def refresh(self):
+ """Refresh current frame."""
+ self.__refresh = True
+
+ def previous(self):
+
+ self.__next_image_index -= 1
+
+ # Clip image index
+ if self.__next_image_index < 0:
+ self.__next_image_index = 0
+
+ def next(self):
+
+ self.__next_image_index += 1
+
+ # Clip image index
+ if self.__next_image_index < 0:
+ self.__next_image_index = 0
+
+ @property
+ def duration(self) -> int|float:
+ """Get data duration."""
+
+ return self.__movie_length / self.__movie_fps
+
+ @property
+ def progression(self) -> float:
+ """Get data processing progression between 0 and 1."""
+
+ if self.__current_image_index is not None:
+
+ return self.__current_image_index / self.__movie_length
+
+ else:
+
+ return 0. \ No newline at end of file
diff --git a/src/argaze/utils/demo/opencv_movie_context.json b/src/argaze/utils/demo/opencv_movie_context.json
new file mode 100644
index 0000000..f7da7ee
--- /dev/null
+++ b/src/argaze/utils/demo/opencv_movie_context.json
@@ -0,0 +1,7 @@
+{
+ "argaze.utils.contexts.OpenCV.Movie" : {
+ "name": "OpenCV Window",
+ "path": "./src/argaze/utils/demo/tobii_record/segments/1/fullstream.mp4",
+ "pipeline": "aruco_markers_pipeline.json"
+ }
+} \ No newline at end of file