diff options
-rw-r--r-- | src/argaze/ArFeatures.py | 10 |
1 files changed, 3 insertions, 7 deletions
diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 033e290..0d2062d 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -91,7 +91,7 @@ class ArEnvironment(): # dict: else: - new_aruco_detector_parameters = ArUcoDetector.DetectorParameters(**new_detector_data.pop('parameters')) + new_aruco_detector_parameters = ArUcoDetector.DetectorParameters(**detector_parameters_value) new_aruco_detector = ArUcoDetector.ArUcoDetector(new_aruco_dictionary, new_marker_size, new_optic_parameters, new_aruco_detector_parameters) @@ -184,8 +184,6 @@ class ArScene(): aoi_scene: AOI 3D scene description that will be projected onto estimated scene once its pose will be estimated : see [project][argaze.ArFeatures.ArScene.project] function below. - aoi_frames: Link sub AOIs to a parent AOI (e.g. AOIFrame). By default all AOIs are linked to orthogonal_projection. - aruco_axis: Optional dictionary to define orthogonal axis where each axis is defined by list of 3 markers identifier (first is origin). \ This pose estimation strategy is used by [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function when at least 3 markers are detected. @@ -198,7 +196,6 @@ class ArScene(): aruco_scene: ArUcoScene.ArUcoScene = field(default_factory=ArUcoScene.ArUcoScene) aoi_scene: AOI3DScene.AOI3DScene = field(default_factory=AOI3DScene.AOI3DScene) - aoi_frames: dict = field(default_factory=dict) aruco_axis: dict = field(default_factory=dict) aruco_aoi: dict = field(default_factory=dict) angle_tolerance: float = field(default=0.) @@ -212,8 +209,6 @@ class ArScene(): # Preprocess orthogonal projection to speed up further aruco aoi processings self.__orthogonal_projection_cache = self.orthogonal_projection - print(self.aoi_frames) - def __str__(self) -> str: """ Returns: @@ -236,9 +231,10 @@ class ArScene(): """ scene_size = self.aoi_scene.size + scene_center = self.aoi_scene.center # Center, step back and rotate pose to get whole scene into field of view - tvec = self.aoi_scene.center*[-1, 1, 0] + [0, 0, scene_size[1]] + tvec = scene_center*[-1, 1, 0] + [0, 0, scene_size[1]] rvec = numpy.array([[-numpy.pi, 0.0, 0.0]]) # Edit optic intrinsic parameter to capture whole scene |