diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/argaze/ArFeatures.py | 42 | ||||
-rw-r--r-- | src/argaze/utils/demo_ar_features_run.py | 18 | ||||
-rw-r--r-- | src/argaze/utils/demo_environment/setup.json | 1 |
3 files changed, 37 insertions, 24 deletions
diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 493ca8a..146325a 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -19,6 +19,7 @@ from argaze.AreaOfInterest import * from argaze.GazeAnalysis import * import numpy +import cv2 ArEnvironmentType = TypeVar('ArEnvironment', bound="ArEnvironment") # Type definition for type annotation convenience @@ -144,6 +145,17 @@ class ArEnvironment(): new_screen_size = screen_data.pop('size') + # Load background image + try: + + new_screen_background_value = screen_data.pop('background') + new_screen_background = cv2.imread(os.path.join(working_directory, new_screen_background_value)) + new_screen_background = cv2.resize(new_screen_background, dsize=(new_screen_size[0], new_screen_size[1]), interpolation=cv2.INTER_CUBIC) + + except: + + new_screen_background = numpy.zeros((new_screen_size[1], new_screen_size[0], 3)).astype(numpy.uint8) + # Load gaze movement identifier try: @@ -160,7 +172,7 @@ class ArEnvironment(): gaze_movement_identifier = None # Append new screen - new_screens[screen_name] = ArScreen.from_scene(new_aoi_scene, screen_name, new_screen_size, gaze_movement_identifier) + new_screens[screen_name] = ArScreen.from_scene(new_aoi_scene, screen_name, new_screen_size, new_screen_background, gaze_movement_identifier) # Append new scene new_scenes[scene_name] = ArScene(new_aruco_scene, new_aoi_scene, new_screens, **scene_data) @@ -204,6 +216,7 @@ class ArEnvironment(): for screen_name, screen in scene.screens.items(): + screen.draw_background() screen.draw_aoi() screen.draw_gaze_position() screen.draw_gaze_movement() @@ -225,11 +238,6 @@ class ArEnvironment(): # Filter scene markers scene_markers, _ = scene.aruco_scene.filter_markers(self.aruco_detector.detected_markers) - # Reset each scene screens - for screen_name, screen in scene.screens.items(): - - screen.init() - # Project scene try: @@ -527,11 +535,13 @@ class ArScreen(): Parameters: name: name of the screen size: screen dimension in pixel. + background: image to draw behind aoi_screen: AOI 2D scene description ... : see [orthogonal_projection][argaze.ArFeatures.ArScene.orthogonal_projection] and [reframe][argaze.AreaOfInterest.AOI2DScene.reframe] functions. """ name: str size: tuple[int] = field(default=(1, 1)) + background: numpy.array = field(default_factory=numpy.array) aoi_screen: AOI2DScene.AOI2DScene = field(default_factory=AOI2DScene.AOI2DScene) gaze_movement_identifier: GazeFeatures.GazeMovementIdentifier = field(default_factory=GazeFeatures.GazeMovementIdentifier) @@ -540,17 +550,17 @@ class ArScreen(): # Define scene attribute: it will be setup by parent scene later self._scene = None - # Init screen - self.init() + # Init screen image + self.draw_background() # Init gaze data self.__gaze_position = GazeFeatures.UnvalidGazePosition() self.__gaze_movement = GazeFeatures.UnvalidGazeMovement() @classmethod - def from_scene(self, aoi_scene, aoi_name, size, gaze_movement_identifier) -> ArScreenType: + def from_scene(self, aoi_scene, aoi_name, size, background, gaze_movement_identifier) -> ArScreenType: - return ArScreen(aoi_name, size, aoi_scene.orthogonal_projection.reframe(aoi_name, size), gaze_movement_identifier) + return ArScreen(aoi_name, size, background, aoi_scene.orthogonal_projection.reframe(aoi_name, size), gaze_movement_identifier) @property def image(self): @@ -558,11 +568,6 @@ class ArScreen(): return self.__image - def init(self) -> ArScreenType: - """Initialize screen image and gaze position.""" - - self.__image = numpy.zeros((self.size[1], self.size[0], 3)).astype(numpy.uint8) - def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition): self.__gaze_position = gaze_position * self.size @@ -580,6 +585,13 @@ class ArScreen(): print(f'Saccade identified in {self.name} screen') + def draw_background(self) -> ArScreenType: + """Initialize screen image with background image.""" + + assert(self.background.shape[0] != self.size[0] or self.background.shape[1] != self.size[1]) + + self.__image = self.background.copy() + def draw_aoi(self, color=(255, 255, 255)): """Draw aoi into screen image.""" diff --git a/src/argaze/utils/demo_ar_features_run.py b/src/argaze/utils/demo_ar_features_run.py index 5c96abc..2ce6731 100644 --- a/src/argaze/utils/demo_ar_features_run.py +++ b/src/argaze/utils/demo_ar_features_run.py @@ -18,22 +18,22 @@ import numpy def main(): """ - Load AR environment from .json file, detect ArUco markers into camera device images and estimate environment pose. + Load AR environment from .json file, detect ArUco markers into camera device images and project it. """ current_directory = os.path.dirname(os.path.abspath(__file__)) # Manage arguments parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) + parser.add_argument('environment', metavar='ENVIRONMENT', type=str, help='ar environment filepath') parser.add_argument('-d', '--device', metavar='DEVICE', type=int, default=0, help='video capture device id') args = parser.parse_args() # Load AR enviroment - demo_environment_filepath = os.path.join(current_directory, 'demo_environment/setup.json') - demo_environment = ArFeatures.ArEnvironment.from_json(demo_environment_filepath) + ar_environment = ArFeatures.ArEnvironment.from_json(args.environment) # Create a window to display AR environment - cv2.namedWindow(demo_environment.name, cv2.WINDOW_AUTOSIZE) + cv2.namedWindow(ar_environment.name, cv2.WINDOW_AUTOSIZE) # Init timestamp start_time = time.time() @@ -45,10 +45,10 @@ def main(): ts = (time.time() - start_time) * 1e3 # Project gaze posiiton into environment - demo_environment.look(ts, GazeFeatures.GazePosition((x, y))) + ar_environment.look(ts, GazeFeatures.GazePosition((x, y))) # Attach mouse callback to window - cv2.setMouseCallback(demo_environment.name, on_mouse_event) + cv2.setMouseCallback(ar_environment.name, on_mouse_event) # Enable camera video capture video_capture = cv2.VideoCapture(args.device) @@ -67,13 +67,13 @@ def main(): # Try to detect and project environment try: - demo_environment.detect_and_project(video_image) + ar_environment.detect_and_project(video_image) # Draw environment - cv2.imshow(demo_environment.name, demo_environment.image) + cv2.imshow(ar_environment.name, ar_environment.image) # Draw each screens - for scene_name, screen_name, screen_image in demo_environment.screens_image(): + for scene_name, screen_name, screen_image in ar_environment.screens_image(): cv2.imshow(f'{scene_name}:{screen_name}', screen_image) diff --git a/src/argaze/utils/demo_environment/setup.json b/src/argaze/utils/demo_environment/setup.json index f4f1fbe..2044e35 100644 --- a/src/argaze/utils/demo_environment/setup.json +++ b/src/argaze/utils/demo_environment/setup.json @@ -19,6 +19,7 @@ "screens": { "GrayRectangle": { "size": [320, 240], + "background": "screen_background.jpg", "gaze_movement_identifier": { "type": "DispersionThresholdIdentification", "parameters": { |