From b947573f7dbccb5b2b13b64677192145f2dbb864 Mon Sep 17 00:00:00 2001 From: Theo De La Hogue Date: Fri, 22 Sep 2023 22:06:20 +0200 Subject: Working on AOI frame feature: now 2D AOI in scene frame are merged into 3D AOI in scene layer. --- src/argaze.test/AreaOfInterest/AOIFeatures.py | 16 ++-- src/argaze/ArFeatures.py | 94 ++++++++++++---------- src/argaze/AreaOfInterest/AOI2DScene.py | 31 ++++++- src/argaze/AreaOfInterest/AOI3DScene.py | 9 ++- src/argaze/AreaOfInterest/AOIFeatures.py | 81 ++++++++++++++----- src/argaze/utils/demo_data/aoi_2d_scene.json | 5 ++ src/argaze/utils/demo_data/aoi_3d_scene.obj | 50 ------------ .../utils/demo_data/demo_aruco_markers_setup.json | 12 ++- .../utils/demo_data/demo_gaze_analysis_setup.json | 7 +- src/argaze/utils/demo_gaze_analysis_run.py | 20 ++--- 10 files changed, 175 insertions(+), 150 deletions(-) create mode 100644 src/argaze/utils/demo_data/aoi_2d_scene.json (limited to 'src') diff --git a/src/argaze.test/AreaOfInterest/AOIFeatures.py b/src/argaze.test/AreaOfInterest/AOIFeatures.py index bced0aa..6df33ca 100644 --- a/src/argaze.test/AreaOfInterest/AOIFeatures.py +++ b/src/argaze.test/AreaOfInterest/AOIFeatures.py @@ -118,13 +118,17 @@ class TestAreaOfInterestClass(unittest.TestCase): aoi_2D = AOIFeatures.AreaOfInterest([[0, 0], [0, 2], [2, 2], [2, 0]]) - self.assertEqual(aoi_2D.inner_axis((1, 1)), (0.5, 0.5)) + self.assertEqual(aoi_2D.inner_axis(1, 1), (0.5, 0.5)) def test_outter_axis(self): aoi_2D = AOIFeatures.AreaOfInterest([[0, 0], [0, 2], [2, 2], [2, 0]]) - self.assertEqual(aoi_2D.outter_axis((0.5, 0.5)), (1, 1)) + self.assertEqual(aoi_2D.outter_axis(0.5, 0.5), (1, 1)) + + aoi_3D = AOIFeatures.AreaOfInterest([[1, 0, 0], [1, 0, 2], [1, 2, 2], [1, 2, 0]]) + + self.assertEqual(aoi_3D.outter_axis(0.5, 0.5), (1, 1, 1)) def test_circle_intersection(self): @@ -181,21 +185,15 @@ class TestAOISceneClass(unittest.TestCase): aoi_2d_scene_AB["A"] = AOIFeatures.AreaOfInterest([[0, 0], [0, 1], [1, 1], [1, 0]]) aoi_2d_scene_AB["B"] = AOIFeatures.AreaOfInterest([[0, 0], [0, 2], [2, 2], [2, 0]]) - print('aoi_2d_scene_AB vars: ', vars(aoi_2d_scene_AB)) - # Create second scene with C and D aoi aoi_2d_scene_CD = AOIFeatures.AOIScene(2,) aoi_2d_scene_CD["C"] = AOIFeatures.AreaOfInterest([[0, 0], [0, 3], [3, 3], [3, 0]]) aoi_2d_scene_CD["D"] = AOIFeatures.AreaOfInterest([[0, 0], [0, 4], [4, 4], [4, 0]]) - print('aoi_2d_scene_CD vars: ', vars(aoi_2d_scene_CD)) - # Merge first scene and second scene into a third scene aoi_2d_scene_ABCD = aoi_2d_scene_AB | aoi_2d_scene_CD - print('aoi_2d_scene_ABCD vars: ', vars(aoi_2d_scene_ABCD)) - # Check third scene self.assertEqual(aoi_2d_scene_ABCD.dimension, 2) self.assertEqual(len(aoi_2d_scene_ABCD.items()), 4) @@ -205,8 +203,6 @@ class TestAOISceneClass(unittest.TestCase): # Merge second scene into first scene aoi_2d_scene_AB |= aoi_2d_scene_CD - print('aoi_2d_scene_AB vars: ', vars(aoi_2d_scene_AB)) - # Check first scene self.assertEqual(aoi_2d_scene_AB.dimension, 2) self.assertEqual(len(aoi_2d_scene_AB.items()), 4) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 96976c2..ad17df2 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -192,6 +192,8 @@ class ArLayer(): except KeyError: + pass + # Add AOI 2D Scene by default new_aoi_scene = AOI2DScene.AOI2DScene() @@ -484,7 +486,7 @@ class ArLayer(): # Draw aoi if required if draw_aoi_scene is not None: - + self.aoi_scene.draw(image, **draw_aoi_scene) # Draw aoi matching if required @@ -728,11 +730,6 @@ class ArFrame(): # Create layer new_layer = ArLayer.from_dict(layer_data, working_directory) - # Project 3D aoi scene layer to get only 2D aoi scene - if new_layer.aoi_scene.dimension == 3: - - new_layer.aoi_scene = new_layer.aoi_scene.orthogonal_projection * new_frame_size - # Append new layer new_layers[layer_name] = new_layer @@ -1099,13 +1096,6 @@ class ArScene(): frame.parent = self - # Preprocess orthogonal projection to speed up further processings - self.__orthogonal_projection_cache = {} - - for layer_name, layer in self.layers.items(): - - self.__orthogonal_projection_cache[layer_name] = layer.aoi_scene.orthogonal_projection - def __str__(self) -> str: """ Returns: @@ -1184,54 +1174,70 @@ class ArScene(): for frame_name, frame_data in scene_data.pop('frames').items(): - # Append name - frame_data['name'] = frame_name + # str: relative path to file + if type(frame_data) == str: + + filepath = os.path.join(working_directory, frame_data) + file_format = filepath.split('.')[-1] + + # JSON file format for 2D or 3D dimension + if file_format == 'json': + + new_frame = ArFrame.from_json(filepath) - # Create frame - new_frame = ArFrame.from_dict(frame_data, working_directory) + # dict: + else: - # Look for AOI with same frame name - aoi_frame = None - aoi_frame_found = False - for layer_name, layer in new_layers.items(): + # Append name + frame_data['name'] = frame_name + + new_frame = ArFrame.from_dict(frame_data, working_directory) + + # Look for a scene layer with an AOI named like the frame + for scene_layer_name, scene_layer in new_layers.items(): try: - aoi_frame = layer.aoi_scene[frame_name] - aoi_frame_found = True + frame_3d = scene_layer.aoi_scene[frame_name] - except KeyError: + # Check that the frame have a layer named like this scene layer + aoi_2d_scene = new_frame.layers[scene_layer_name].aoi_scene + + # Transform 2D frame layer AOIs into 3D scene layer AOIs + # Then, add them to scene layer + scene_layer.aoi_scene |= aoi_2d_scene.dimensionalize(frame_3d, new_frame.size) - # AOI name should be unique - break + '''DEPRECATED: but maybe still usefull? + # Project and reframe each layers into corresponding frame layers + for frame_layer_name, frame_layer in new_frame.layers.items(): - if aoi_frame_found: + try: - # Project and reframe each layers into corresponding frame layers - for frame_layer_name, frame_layer in new_frame.layers.items(): + layer = new_layers[frame_layer_name] + + layer_aoi_scene_projection = layer.aoi_scene.orthogonal_projection + aoi_frame_projection = layer_aoi_scene_projection[frame_name] - try: + frame_layer.aoi_scene = layer_aoi_scene_projection.reframe(aoi_frame_projection, new_frame.size) - layer = new_layers[frame_layer_name] - - layer_aoi_scene_projection = layer.aoi_scene.orthogonal_projection - aoi_frame_projection = layer_aoi_scene_projection[frame_name] + if frame_layer.aoi_scan_path is not None: - frame_layer.aoi_scene = layer_aoi_scene_projection.reframe(aoi_frame_projection, new_frame.size) + # Edit expected AOI list by removing AOI with name equals to frame layer name + expected_aois = list(layer.aoi_scene.keys()) - if frame_layer.aoi_scan_path is not None: + if frame_layer_name in expected_aois: + expected_aois.remove(frame_layer_name) - # Edit expected AOI list by removing AOI with name equals to frame layer name - expected_aois = list(layer.aoi_scene.keys()) + frame_layer.aoi_scan_path.expected_aois = expected_aois - if frame_layer_name in expected_aois: - expected_aois.remove(frame_layer_name) + except KeyError: - frame_layer.aoi_scan_path.expected_aois = expected_aois + continue + ''' - except KeyError: + except KeyError as e: - continue + print(e) # Append new frame new_frames[frame_name] = new_frame @@ -1437,7 +1443,7 @@ class ArCamera(ArFrame): # TODO?: Should we prefer to use camera frame AOIMatcher object? if aoi_2d.contains_point(gaze_position.value): - inner_x, inner_y = aoi_2d.clockwise().inner_axis(gaze_position.value) + inner_x, inner_y = aoi_2d.clockwise().inner_axis(*gaze_position.value) # QUESTION: How to project gaze precision? inner_gaze_position = GazeFeatures.GazePosition((inner_x, inner_y)) diff --git a/src/argaze/AreaOfInterest/AOI2DScene.py b/src/argaze/AreaOfInterest/AOI2DScene.py index 73c977f..f6b8dcb 100644 --- a/src/argaze/AreaOfInterest/AOI2DScene.py +++ b/src/argaze/AreaOfInterest/AOI2DScene.py @@ -10,7 +10,7 @@ __license__ = "BSD" from typing import TypeVar, Tuple from argaze import DataStructures -from argaze.AreaOfInterest import AOIFeatures +from argaze.AreaOfInterest import AOIFeatures, AOI3DScene from argaze import GazeFeatures import cv2 @@ -19,6 +19,9 @@ import numpy AOI2DSceneType = TypeVar('AOI2DScene', bound="AOI2DScene") # Type definition for type annotation convenience +AOI3DSceneType = TypeVar('AOI3DScene', bound="AOI3DScene") +# Type definition for type annotation convenience + class AOI2DScene(AOIFeatures.AOIScene): """Define AOI 2D scene.""" @@ -89,6 +92,7 @@ class AOI2DScene(AOIFeatures.AOIScene): yield name, aoi, matched_region, aoi_ratio, circle_ratio + '''DEPRECATED: but maybe still usefull? def reframe(self, aoi: AOIFeatures.AreaOfInterest, size: tuple) -> AOI2DSceneType: """ Reframe whole scene to a scene bounded by a 4 vertices 2D AOI. @@ -120,3 +124,28 @@ class AOI2DScene(AOIFeatures.AOIScene): aoi2D_scene[name] = numpy.matmul(aoi2D - Src_origin, M.T) return aoi2D_scene + ''' + def dimensionalize(self, frame_3d: AOIFeatures.AreaOfInterest, size: tuple) -> AOI3DSceneType: + """ + Convert to 3D scene considering it is inside of 3D rectangular frame. + + Parameters: + aoi_frame_3d: rectangle 3D AOI to use as referential plane + size: size of the frame in pixel + + Returns: + AOI 3D scene + """ + + # Vectorize outter_axis function + vfunc = numpy.vectorize(frame_3d.outter_axis) + + # Prepare new AOI 3D scene + aoi3D_scene = AOI3DScene.AOI3DScene() + + for name, aoi2D in self.items(): + + X, Y = (aoi2D / size).T + aoi3D_scene[name] = numpy.array(vfunc(X, Y)).T.view(AOIFeatures.AreaOfInterest) + + return aoi3D_scene diff --git a/src/argaze/AreaOfInterest/AOI3DScene.py b/src/argaze/AreaOfInterest/AOI3DScene.py index 8ea6048..bfe189a 100644 --- a/src/argaze/AreaOfInterest/AOI3DScene.py +++ b/src/argaze/AreaOfInterest/AOI3DScene.py @@ -108,9 +108,9 @@ class AOI3DScene(AOIFeatures.AOIScene): file.close() - # retreive all aoi3D vertices + # retreive all aoi3D vertices and sort them in clockwise order for name, face in faces.items(): - aoi3D = AOIFeatures.AreaOfInterest([ vertices[i-1] for i in face ]) + aoi3D = AOIFeatures.AreaOfInterest([ vertices[i-1] for i in reversed(face) ]) aois_3d[name] = aoi3D except IOError: @@ -149,8 +149,9 @@ class AOI3DScene(AOIFeatures.AOIScene): file.write('s off\n') file.write(vertices_ids + '\n') + '''DEPRECATED: but maybe still usefull? @property - def orthogonal_projection(self) -> AOI2DScene.AOI2DScene: + def orthogonal_projection(self) -> AOI2DSceneType: """ Orthogonal projection of whole scene. @@ -169,7 +170,7 @@ class AOI3DScene(AOIFeatures.AOIScene): K = numpy.array([[scene_size[1]/scene_size[0], 0.0, 0.5], [0.0, 1., 0.5], [0.0, 0.0, 1.0]]) return self.project(tvec, rvec, K) - + ''' def vision_cone(self, cone_radius, cone_height, cone_tip=[0., 0., 0.], cone_direction=[0., 0., 1.]) -> Tuple[AOI3DSceneType, AOI3DSceneType]: """Get AOI which are inside and out a given cone field. diff --git a/src/argaze/AreaOfInterest/AOIFeatures.py b/src/argaze/AreaOfInterest/AOIFeatures.py index e5585c5..ffaf882 100644 --- a/src/argaze/AreaOfInterest/AOIFeatures.py +++ b/src/argaze/AreaOfInterest/AOIFeatures.py @@ -127,8 +127,8 @@ class AreaOfInterest(numpy.ndarray): return mpath.Path(self).contains_points([point])[0] - def inner_axis(self, point: tuple) -> tuple: - """Transform the coordinates from the global axis to the AOI's axis. + def inner_axis(self, x: float, y: float) -> tuple: + """Transform a point coordinates from global axis to AOI axis. !!! warning Available for 2D AOI only. !!! danger @@ -143,35 +143,30 @@ class AreaOfInterest(numpy.ndarray): Dst = numpy.array([[0., 0.], [1., 0.], [1., 1.], [0., 1.]]).astype(numpy.float32) P = cv2.getPerspectiveTransform(Src, Dst) - X = numpy.append(numpy.array(numpy.array(point) - Src_origin), [1.0]).astype(numpy.float32) + X = numpy.append(numpy.array(numpy.array([x, y]) - Src_origin), [1.0]).astype(numpy.float32) Y = numpy.dot(P, X) La = (Y/Y[2])[:-1] return tuple(numpy.around(La, 4)) - def outter_axis(self, point: tuple) -> tuple: - """Transform the coordinates from the AOI's axis to the global axis. - !!! warning - Available for 2D AOI only. + def outter_axis(self, x: float, y: float) -> tuple: + """Transform a point coordinates from AOI axis to global axis. !!! danger - The AOI points must be sorted in clockwise order.""" - - assert(self.dimension == 2) + The AOI points must be sorted in clockwise order. + !!! danger + The AOI must be a rectangle.""" - Src = numpy.array([[0., 0.], [1., 0.], [1., 1.], [0., 1.]]).astype(numpy.float32) + # Origin point + O = self[0] - Dst = self.astype(numpy.float32) - Dst_origin = Dst[0] - Dst = (Dst - Dst_origin).reshape((len(Dst)), 2) + # Horizontal axis vector + H = self[1] - self[0] - P = cv2.getPerspectiveTransform(Src, Dst) - X = numpy.array([point[0], point[1], 1.0]).astype(numpy.float32) - Y = numpy.dot(P, X) + # Vertical axis vector + V = self[3] - self[0] - Lp = Dst_origin + (Y/Y[2])[:-1] - - return tuple(numpy.rint(Lp).astype(int)) + return tuple(O + x * H + y * V) def circle_intersection(self, center: tuple, radius: float) -> Tuple[numpy.array, float, float]: """Get intersection shape with a circle, intersection area / AOI area ratio and intersection area / circle area ratio. @@ -353,6 +348,42 @@ class AOIScene(): return output + def __add__(self, add_vector) -> AOISceneType: + """Add vector to scene.""" + + assert(len(add_vector) == self.__dimension) + + for name, area in self.__areas.items(): + + self.__areas[name] = self.__areas[name] + add_vector + + return self + + # Allow n + scene operation + __radd__ = __add__ + + def __sub__(self, sub_vector) -> AOISceneType: + """Sub vector to scene.""" + + assert(len(sub_vector) == self.__dimension) + + for name, area in self.__areas.items(): + + self.__areas[name] = self.__areas[name] - sub_vector + + return self + + def __rsub__(self, rsub_vector) -> AOISceneType: + """RSub vector to scene.""" + + assert(len(rsub_vector) == self.__dimension) + + for name, area in self.__areas.items(): + + self.__areas[name] = rsub_vector - self.__areas[name] + + return self + def __mul__(self, scale_vector) -> AOISceneType: """Scale scene by a vector.""" @@ -367,6 +398,16 @@ class AOIScene(): # Allow n * scene operation __rmul__ = __mul__ + def __truediv__(self, div_vector) -> AOISceneType: + + assert(len(div_vector) == self.__dimension) + + for name, area in self.__areas.items(): + + self.__areas[name] = self.__areas[name] / div_vector + + return self + def items(self) -> Tuple[str, AreaOfInterest]: """Iterate over areas.""" diff --git a/src/argaze/utils/demo_data/aoi_2d_scene.json b/src/argaze/utils/demo_data/aoi_2d_scene.json new file mode 100644 index 0000000..a0726e8 --- /dev/null +++ b/src/argaze/utils/demo_data/aoi_2d_scene.json @@ -0,0 +1,5 @@ +{ + "RedSquare": [[268, 203], [576, 203], [576, 510], [268, 510]], + "BlueTriangle":[[960, 664], [1113, 971], [806, 971]], + "GreenCircle":[[1497, 203], [1527, 206], [1556, 215], [1582, 229], [1605, 248], [1624, 271], [1639, 298], [1647, 327], [1650, 357], [1647, 387], [1639, 415], [1624, 442], [1605, 465], [1582, 484], [1556, 498], [1527, 507], [1497, 510], [1467, 507], [1438, 498], [1411, 484], [1388, 465], [1369, 442], [1355, 415], [1346, 387], [1343, 357], [1346, 327], [1355, 298], [1369, 271], [1388, 248], [1411, 229], [1438, 215], [1467, 206]] +} \ No newline at end of file diff --git a/src/argaze/utils/demo_data/aoi_3d_scene.obj b/src/argaze/utils/demo_data/aoi_3d_scene.obj index d32e235..0ce97de 100644 --- a/src/argaze/utils/demo_data/aoi_3d_scene.obj +++ b/src/argaze/utils/demo_data/aoi_3d_scene.obj @@ -1,5 +1,3 @@ -# Blender v3.0.1 OBJ File: 'ar_camera.blend' -# www.blender.org o GrayRectangle v 0.000000 0.000000 0.000000 v 25.000000 0.000000 0.000000 @@ -7,51 +5,3 @@ v 0.000000 14.960000 0.000000 v 25.000000 14.960000 0.000000 s off f 1 2 4 3 -o RedSquare -v 3.497026 8.309391 0.000000 -v 7.504756 8.309391 0.000000 -v 3.497026 12.314838 0.001030 -v 7.504756 12.314838 0.001030 -s off -f 5 6 8 7 -o BlueTriangle -v 10.500295 2.307687 0.000000 -v 14.503224 2.306344 0.000000 -v 12.502419 6.312207 0.001030 -s off -f 9 10 11 -o GreenCircle -v 19.495552 12.311101 0.000000 -v 19.105371 12.272672 0.000000 -v 18.730185 12.158860 0.000000 -v 18.384411 11.974040 0.000000 -v 18.081339 11.725314 0.000000 -v 17.832613 11.422241 0.000000 -v 17.647793 11.076468 0.000000 -v 17.533981 10.701282 0.000000 -v 17.495552 10.311101 0.000000 -v 17.533981 9.920920 0.000000 -v 17.647793 9.545734 0.000000 -v 17.832613 9.199961 0.000000 -v 18.081339 8.896888 0.000000 -v 18.384411 8.648162 0.000000 -v 18.730185 8.463342 0.000000 -v 19.105371 8.349530 0.000000 -v 19.495552 8.311101 0.000000 -v 19.885733 8.349530 0.000000 -v 20.260920 8.463342 0.000000 -v 20.606693 8.648162 0.000000 -v 20.909765 8.896887 0.000000 -v 21.158491 9.199960 0.000000 -v 21.343311 9.545733 0.000000 -v 21.457123 9.920920 0.000000 -v 21.495552 10.311101 0.000000 -v 21.457123 10.701282 0.000000 -v 21.343311 11.076468 0.000000 -v 21.158491 11.422241 0.000000 -v 20.909765 11.725314 0.000000 -v 20.606693 11.974040 0.000000 -v 20.260920 12.158860 0.000000 -v 19.885733 12.272672 0.000000 -s off -f 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 12 diff --git a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json index 9a3b79f..5168297 100644 --- a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json +++ b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json @@ -53,7 +53,7 @@ }, "frames": { "GrayRectangle": { - "size": [640, 383], + "size": [1920, 1149], "background": "frame_background.jpg", "gaze_movement_identifier": { "DispersionThresholdIdentification": { @@ -65,12 +65,10 @@ "duration_max": 10000 }, "layers": { - "GrayRectangle": { - "aoi_scene": "aoi_3d_scene.obj", + "main_layer": { + "aoi_scene": "aoi_2d_scene.json", "aoi_matcher": { - "FocusPointInside": { - "exclude": ["GrayRectangle"] - } + "FocusPointInside": {} } } }, @@ -91,7 +89,7 @@ } }, "draw_layers": { - "GrayRectangle": { + "main_layer": { "draw_aoi_scene": { "draw_aoi": { "color": [255, 255, 255], diff --git a/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json b/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json index 414a6fe..52945ae 100644 --- a/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json +++ b/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json @@ -25,11 +25,10 @@ "size": [320, 240] }, "layers": { - "GrayRectangle": { - "aoi_scene": "aoi_3d_scene.obj", + "main_layer": { + "aoi_scene": "aoi_2d_scene.json", "aoi_matcher": { "DeviationCircleCoverage": { - "exclude": ["GrayRectangle"], "coverage_threshold": 0.5 } }, @@ -64,7 +63,7 @@ "deepness": 0 }, "draw_layers": { - "GrayRectangle": { + "main_layer": { "draw_aoi_scene": { "draw_aoi": { "color": [255, 255, 255], diff --git a/src/argaze/utils/demo_gaze_analysis_run.py b/src/argaze/utils/demo_gaze_analysis_run.py index 465c5db..789657b 100644 --- a/src/argaze/utils/demo_gaze_analysis_run.py +++ b/src/argaze/utils/demo_gaze_analysis_run.py @@ -74,18 +74,18 @@ def main(): # Write last 5 steps of aoi scan path path = '' - for step in ar_frame.layers["GrayRectangle"].aoi_scan_path[-5:]: + for step in ar_frame.layers["main_layer"].aoi_scan_path[-5:]: path += f'> {step.aoi} ' - path += f'> {ar_frame.layers["GrayRectangle"].aoi_scan_path.current_aoi}' + path += f'> {ar_frame.layers["main_layer"].aoi_scan_path.current_aoi}' cv2.putText(frame_image, path, (20, ar_frame.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) # Display Transition matrix analysis if loaded try: - transition_matrix_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.TransitionMatrix"] + transition_matrix_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.TransitionMatrix"] cv2.putText(frame_image, f'Transition matrix density: {transition_matrix_analyzer.transition_matrix_density:.2f}', (20, ar_frame.size[1]-160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) @@ -97,8 +97,8 @@ def main(): if from_aoi != to_aoi and probability > 0.0: - from_center = ar_frame.layers['GrayRectangle'].aoi_scene[from_aoi].center.astype(int) - to_center = ar_frame.layers['GrayRectangle'].aoi_scene[to_aoi].center.astype(int) + from_center = ar_frame.layers["main_layer"].aoi_scene[from_aoi].center.astype(int) + to_center = ar_frame.layers["main_layer"].aoi_scene[to_aoi].center.astype(int) start_line = (0.5 * from_center + 0.5 * to_center).astype(int) color = [int(probability*200) + 55, int(probability*200) + 55, int(probability*200) + 55] @@ -112,7 +112,7 @@ def main(): # Display aoi scan path basic metrics analysis if loaded try: - basic_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.Basic"] + basic_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.Basic"] # Write basic analysis cv2.putText(frame_image, f'Step number: {basic_analyzer.steps_number}', (20, ar_frame.size[1]-440), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) @@ -141,7 +141,7 @@ def main(): # Display aoi scan path K-modified coefficient analysis if loaded try: - aoi_kc_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.KCoefficient"] + aoi_kc_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.KCoefficient"] # Write aoi Kc analysis if aoi_kc_analyzer.K < 0.: @@ -158,7 +158,7 @@ def main(): # Display Lempel-Ziv complexity analysis if loaded try: - lzc_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.LempelZivComplexity"] + lzc_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.LempelZivComplexity"] cv2.putText(frame_image, f'Lempel-Ziv complexity: {lzc_analyzer.lempel_ziv_complexity}', (20, ar_frame.size[1]-200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) @@ -168,7 +168,7 @@ def main(): # Display N-Gram analysis if loaded try: - ngram_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.NGram"] + ngram_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.NGram"] # Display only 3-gram analysis start = ar_frame.size[1] - ((len(ngram_analyzer.ngrams_count[3]) + 1) * 40) @@ -188,7 +188,7 @@ def main(): # Display Entropy analysis if loaded try: - entropy_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.Entropy"] + entropy_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.Entropy"] cv2.putText(frame_image, f'Stationary entropy: {entropy_analyzer.stationary_entropy:.3f},', (20, ar_frame.size[1]-280), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) cv2.putText(frame_image, f'Transition entropy: {entropy_analyzer.transition_entropy:.3f},', (20, ar_frame.size[1]-240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) -- cgit v1.1 From 23fa1a7835b3c7cfd976b1d160878289b1f0657c Mon Sep 17 00:00:00 2001 From: Theo De La Hogue Date: Sat, 23 Sep 2023 07:22:23 +0200 Subject: Fixing code annotation. Removing useless documentation section. Fixing documentation cross reference. --- src/argaze/ArFeatures.py | 8 ++++---- src/argaze/ArUcoMarkers/ArUcoCamera.py | 6 +++--- src/argaze/AreaOfInterest/AOI2DScene.py | 9 ++++++--- 3 files changed, 13 insertions(+), 10 deletions(-) (limited to 'src') diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index ad17df2..54ef918 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -1044,7 +1044,7 @@ class ArFrame(): return image - def image(self, **kwargs) -> numpy.array: + def image(self, **kwargs: dict) -> numpy.array: """ Get frame image. @@ -1248,7 +1248,7 @@ class ArScene(): return ArScene(new_scene_name, new_layers, new_frames, **scene_data) - def estimate_pose(self, detected_features) -> Tuple[numpy.array, numpy.array]: + def estimate_pose(self, detected_features: Any) -> Tuple[numpy.array, numpy.array]: """Define abstract estimate scene pose method. Parameters: @@ -1298,7 +1298,7 @@ class ArScene(): # Project layer aoi scene yield name, aoi_scene_copy.project(tvec, rvec, self.parent.aruco_detector.optic_parameters.K) - def draw(self, image: numpy.array, **kwargs): + def draw(self, image: numpy.array, **kwargs: dict): """ Draw scene into image. @@ -1495,7 +1495,7 @@ class ArCamera(ArFrame): # Unlock camera frame exploitation self._frame_lock.release() - def image(self, **kwargs) -> numpy.array: + def image(self, **kwargs: dict) -> numpy.array: """ Get frame image. diff --git a/src/argaze/ArUcoMarkers/ArUcoCamera.py b/src/argaze/ArUcoMarkers/ArUcoCamera.py index 4f555fb..4c3f042 100644 --- a/src/argaze/ArUcoMarkers/ArUcoCamera.py +++ b/src/argaze/ArUcoMarkers/ArUcoCamera.py @@ -73,7 +73,7 @@ class ArUcoCamera(ArFeatures.ArCamera): return output @classmethod - def from_dict(self, aruco_camera_data, working_directory: str = None) -> ArUcoCameraType: + def from_dict(self, aruco_camera_data: dict, working_directory: str = None) -> ArUcoCameraType: """ Load ArUcoCamera from dictionary. @@ -211,7 +211,7 @@ class ArUcoCamera(ArFeatures.ArCamera): # Return dection time and exceptions return detection_time, exceptions - def __image(self, draw_detected_markers: dict = None, draw_scenes: dict = None, draw_optic_parameters_grid: dict = None, **kwargs) -> numpy.array: + def __image(self, draw_detected_markers: dict = None, draw_scenes: dict = None, draw_optic_parameters_grid: dict = None, **kwargs: dict) -> numpy.array: """Get frame image with ArUco detection visualisation. Parameters: @@ -253,7 +253,7 @@ class ArUcoCamera(ArFeatures.ArCamera): return image - def image(self, **kwargs) -> numpy.array: + def image(self, **kwargs: dict) -> numpy.array: """ Get frame image. diff --git a/src/argaze/AreaOfInterest/AOI2DScene.py b/src/argaze/AreaOfInterest/AOI2DScene.py index f6b8dcb..564f65c 100644 --- a/src/argaze/AreaOfInterest/AOI2DScene.py +++ b/src/argaze/AreaOfInterest/AOI2DScene.py @@ -125,20 +125,23 @@ class AOI2DScene(AOIFeatures.AOIScene): return aoi2D_scene ''' - def dimensionalize(self, frame_3d: AOIFeatures.AreaOfInterest, size: tuple) -> AOI3DSceneType: + def dimensionalize(self, rectangle_3d: AOIFeatures.AreaOfInterest, size: tuple) -> AOI3DSceneType: """ Convert to 3D scene considering it is inside of 3D rectangular frame. Parameters: - aoi_frame_3d: rectangle 3D AOI to use as referential plane + rectangle_3d: rectangle 3D AOI to use as referential plane size: size of the frame in pixel Returns: AOI 3D scene """ + assert(rectangle_3d.dimension == 3) + assert(rectangle_3d.points_number == 4) + # Vectorize outter_axis function - vfunc = numpy.vectorize(frame_3d.outter_axis) + vfunc = numpy.vectorize(rectangle_3d.outter_axis) # Prepare new AOI 3D scene aoi3D_scene = AOI3DScene.AOI3DScene() -- cgit v1.1 From 0410c9980f2c243caa2f2f4f72b814efb72ac654 Mon Sep 17 00:00:00 2001 From: Theo De La Hogue Date: Sat, 23 Sep 2023 07:59:35 +0200 Subject: Fixing missing import. --- src/argaze/ArFeatures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 54ef918..10e9687 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -7,7 +7,7 @@ __credits__ = [] __copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)" __license__ = "BSD" -from typing import TypeVar, Tuple +from typing import TypeVar, Tuple, Any from dataclasses import dataclass, field import json import os -- cgit v1.1 From 998d6c1b6c4216d887f74375e262147a6fbeff67 Mon Sep 17 00:00:00 2001 From: Theo De La Hogue Date: Sat, 23 Sep 2023 08:11:40 +0200 Subject: Fixing map method to use clockwise order. --- src/argaze/ArFeatures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 10e9687..a419d93 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -1483,7 +1483,7 @@ class ArCamera(ArFrame): # Apply perspective transform algorithm to fill aoi frame background width, height = frame.size - destination = numpy.float32([[0, height],[width, height],[width, 0],[0, 0]]) + destination = numpy.float32([[0, 0], [width, 0], [width, height], [0, height]]) mapping = cv2.getPerspectiveTransform(aoi_2d.astype(numpy.float32), destination) frame.background = cv2.warpPerspective(self.background, mapping, (width, height)) -- cgit v1.1 From 217d7ffb68ea4ebbc22cd914cf37d24ce3bcc566 Mon Sep 17 00:00:00 2001 From: Theo De La Hogue Date: Mon, 25 Sep 2023 14:46:46 +0200 Subject: Adding a way to load SVG AOI description. Allowing to use shape to describe rectangular or circular 2D AOI in JSON. --- src/argaze/ArFeatures.py | 5 +++ src/argaze/AreaOfInterest/AOI2DScene.py | 62 ++++++++++++++++++++++++++++++++ src/argaze/AreaOfInterest/AOIFeatures.py | 48 +++++++++++++++++++++++-- 3 files changed, 112 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index a419d93..0750cb5 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -180,6 +180,11 @@ class ArLayer(): new_aoi_scene = AOIFeatures.AOIScene.from_json(filepath) + # SVG file format for 2D dimension only + if file_format == 'svg': + + new_aoi_scene = AOIFeatures.AOI2DScene.from_svg(filepath) + # OBJ file format for 3D dimension only elif file_format == 'obj': diff --git a/src/argaze/AreaOfInterest/AOI2DScene.py b/src/argaze/AreaOfInterest/AOI2DScene.py index 564f65c..4dc47f4 100644 --- a/src/argaze/AreaOfInterest/AOI2DScene.py +++ b/src/argaze/AreaOfInterest/AOI2DScene.py @@ -15,6 +15,7 @@ from argaze import GazeFeatures import cv2 import numpy +from xml.dom import minidom AOI2DSceneType = TypeVar('AOI2DScene', bound="AOI2DScene") # Type definition for type annotation convenience @@ -29,6 +30,67 @@ class AOI2DScene(AOIFeatures.AOIScene): super().__init__(2, aois_2d) + @classmethod + def from_svg(self, svg_filepath: str) -> AOI2DSceneType: + """ + Load areas from .svg file. + + Parameters: + svg_filepath: path to svg file + + !!! note + Available SVG elements are: path, rect and circle. + + !!! warning + Available SVG path d-string commands are: MoveTo (M) LineTo (L) and ClosePath (Z) commands. + """ + + with minidom.parse(svg_filepath) as description_file: + + new_areas = {} + + # Load SVG path + for path in description_file.getElementsByTagName('path'): + + # Convert d-string into array + d_string = path.getAttribute('d') + + assert(d_string[0] == 'M') + assert(d_string[-1] == 'Z') + + points = [(float(x), float(y)) for x, y in [p.split(',') for p in d_string[1:-1].split('L')]] + + new_areas[path.getAttribute('id')] = AOIFeatures.AreaOfInterest(points) + + # Load SVG rect + for rect in description_file.getElementsByTagName('rect'): + + # Convert rect element into dict + rect_dict = { + 'shape': 'rectangle', + 'x': float(rect.getAttribute('x')), + 'y': float(rect.getAttribute('y')), + 'width': float(rect.getAttribute('width')), + 'height': float(rect.getAttribute('height')) + } + + new_areas[rect.getAttribute('id')] = AOIFeatures.AreaOfInterest.from_dict(rect_dict) + + # Load SVG circle + for circle in description_file.getElementsByTagName('circle'): + + # Convert circle element into dict + circle_dict = { + 'shape': 'circle', + 'cx': float(circle.getAttribute('cx')), + 'cy': float(circle.getAttribute('cy')), + 'radius': float(circle.getAttribute('r')) + } + + new_areas[circle.getAttribute('id')] = AOIFeatures.AreaOfInterest.from_dict(circle_dict) + + return AOI2DScene(new_areas) + def draw(self, image: numpy.array, draw_aoi: dict = None, exclude=[]): """Draw AOI polygons on image. diff --git a/src/argaze/AreaOfInterest/AOIFeatures.py b/src/argaze/AreaOfInterest/AOIFeatures.py index ffaf882..debf1fa 100644 --- a/src/argaze/AreaOfInterest/AOIFeatures.py +++ b/src/argaze/AreaOfInterest/AOIFeatures.py @@ -11,6 +11,7 @@ from typing import TypeVar, Tuple from dataclasses import dataclass, field import json import os +import math from argaze import DataStructures @@ -41,6 +42,40 @@ class AreaOfInterest(numpy.ndarray): return repr(self.tolist()) + @classmethod + def from_dict(self, aoi_data: dict, working_directory: str = None) -> AreaOfInterestType: + """Load attributes from dictionary. + + Parameters: + aoi_data: dictionary with attributes to load + working_directory: folder path where to load files when a dictionary value is a relative filepath. + """ + + shape = aoi_data.pop('shape') + + if shape == 'rectangle': + + x = aoi_data.pop('x') + y = aoi_data.pop('y') + width = aoi_data.pop('width') + height = aoi_data.pop('height') + + points = [[x, y], [x+width, y], [x+width, y+height], [x, y+height]] + + return AreaOfInterest(points) + + elif shape == 'circle': + + cx = aoi_data.pop('cx') + cy = aoi_data.pop('cy') + radius = aoi_data.pop('radius') + + # TODO: Use pygeos + N = 32 + points = [(math.cos(2*math.pi / N*x) * radius + cx, math.sin(2*math.pi / N*x) * radius + cy) for x in range(0, N+1)] + + return AreaOfInterest(points) + @property def dimension(self) -> int: """Number of axis coding area points positions.""" @@ -249,8 +284,15 @@ class AOIScene(): # Load areas areas = {} - for name, area in aoi_scene_data.items(): - areas[name] = AreaOfInterest(area) + for area_name, area_data in aoi_scene_data.items(): + + if type(area_data) == list: + + areas[area_name] = AreaOfInterest(area_data) + + elif type(area_data) == dict: + + areas[area_name] = AreaOfInterest.from_dict(area_data) # Default dimension is 0 dimension = 0 @@ -276,7 +318,7 @@ class AOIScene(): aoi_scene_data = json.load(configuration_file) working_directory = os.path.dirname(json_filepath) - return AOIScene.from_dict(aoi_scene_data, working_directory) + return AOIScene.from_dict(aoi_scene_data, working_directory) def __getitem__(self, name) -> AreaOfInterest: """Get an AOI from the scene.""" -- cgit v1.1 From 08791230814241baf2283c07cc6b16b00dcccca4 Mon Sep 17 00:00:00 2001 From: Theo De La Hogue Date: Tue, 26 Sep 2023 10:42:41 +0200 Subject: Changing JSON description schema. --- src/argaze/AreaOfInterest/AOIFeatures.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) (limited to 'src') diff --git a/src/argaze/AreaOfInterest/AOIFeatures.py b/src/argaze/AreaOfInterest/AOIFeatures.py index debf1fa..dfbb165 100644 --- a/src/argaze/AreaOfInterest/AOIFeatures.py +++ b/src/argaze/AreaOfInterest/AOIFeatures.py @@ -51,24 +51,26 @@ class AreaOfInterest(numpy.ndarray): working_directory: folder path where to load files when a dictionary value is a relative filepath. """ - shape = aoi_data.pop('shape') + # Get first and unique shape + # TODO: allow multiple shapes to describe more complex AOI + shape, shape_data = aoi_data.popitem() - if shape == 'rectangle': + if shape == 'Rectangle': - x = aoi_data.pop('x') - y = aoi_data.pop('y') - width = aoi_data.pop('width') - height = aoi_data.pop('height') + x = shape_data.pop('x') + y = shape_data.pop('y') + width = shape_data.pop('width') + height = shape_data.pop('height') points = [[x, y], [x+width, y], [x+width, y+height], [x, y+height]] return AreaOfInterest(points) - elif shape == 'circle': + elif shape == 'Circle': - cx = aoi_data.pop('cx') - cy = aoi_data.pop('cy') - radius = aoi_data.pop('radius') + cx = shape_data.pop('cx') + cy = shape_data.pop('cy') + radius = shape_data.pop('radius') # TODO: Use pygeos N = 32 @@ -462,7 +464,7 @@ class AOIScene(): @property def dimension(self) -> int: - """Dimension of the AOIs in scene.""" + """Dimension of the AOI in scene.""" return self.__dimension -- cgit v1.1 From 12ae7e20aba323624d360567ea424ac2d315fbc7 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 10:47:43 +0200 Subject: Harmonizing AOI/aoi without s at the end. --- src/argaze.test/AreaOfInterest/AOI2DScene.py | 6 +++--- src/argaze.test/AreaOfInterest/AOI3DScene.py | 6 +++--- src/argaze.test/GazeFeatures.py | 4 ++-- src/argaze/ArFeatures.py | 24 +++++++++++----------- src/argaze/AreaOfInterest/AOI2DScene.py | 6 +++--- src/argaze/AreaOfInterest/AOI3DScene.py | 10 ++++----- src/argaze/GazeAnalysis/DeviationCircleCoverage.py | 14 ++++++------- src/argaze/GazeAnalysis/TransitionMatrix.py | 2 +- src/argaze/GazeFeatures.py | 22 ++++++++++---------- 9 files changed, 47 insertions(+), 47 deletions(-) (limited to 'src') diff --git a/src/argaze.test/AreaOfInterest/AOI2DScene.py b/src/argaze.test/AreaOfInterest/AOI2DScene.py index 4e96e98..10ff430 100644 --- a/src/argaze.test/AreaOfInterest/AOI2DScene.py +++ b/src/argaze.test/AreaOfInterest/AOI2DScene.py @@ -187,14 +187,14 @@ class TestTimeStampedAOIScenesClass(unittest.TestCase): aoi_2D_B = AOIFeatures.AreaOfInterest([[1, 1], [1, 2], [2, 2], [2, 1]]) aoi_2d_scene = AOI2DScene.AOI2DScene({"A": aoi_2D_A, "B": aoi_2D_B}) - ts_aois_scenes = AOIFeatures.TimeStampedAOIScenes() + ts_aoi_scenes = AOIFeatures.TimeStampedAOIScenes() - ts_aois_scenes[0] = aoi_2d_scene + ts_aoi_scenes[0] = aoi_2d_scene # Check that only AOIScene can be added with self.assertRaises(AssertionError): - ts_aois_scenes[1] = "This string is not an AOI2DScene" + ts_aoi_scenes[1] = "This string is not an AOI2DScene" if __name__ == '__main__': diff --git a/src/argaze.test/AreaOfInterest/AOI3DScene.py b/src/argaze.test/AreaOfInterest/AOI3DScene.py index b386432..d09f2a8 100644 --- a/src/argaze.test/AreaOfInterest/AOI3DScene.py +++ b/src/argaze.test/AreaOfInterest/AOI3DScene.py @@ -107,14 +107,14 @@ class TestTimeStampedAOIScenesClass(unittest.TestCase): aoi_3D_B = AOIFeatures.AreaOfInterest([[1, 1, 0], [1, 2, 0], [2, 2, 0], [2, 1, 0]]) aoi_3d_scene = AOI3DScene.AOI3DScene({"A": aoi_3D_A, "B": aoi_3D_B}) - ts_aois_scenes = AOIFeatures.TimeStampedAOIScenes() + ts_aoi_scenes = AOIFeatures.TimeStampedAOIScenes() - ts_aois_scenes[0] = aoi_3d_scene + ts_aoi_scenes[0] = aoi_3d_scene # Check that only AOIScene can be added with self.assertRaises(AssertionError): - ts_aois_scenes[1] = "This string is not an AOI3DScene" + ts_aoi_scenes[1] = "This string is not an AOI3DScene" if __name__ == '__main__': diff --git a/src/argaze.test/GazeFeatures.py b/src/argaze.test/GazeFeatures.py index d609dd2..b41c7c7 100644 --- a/src/argaze.test/GazeFeatures.py +++ b/src/argaze.test/GazeFeatures.py @@ -497,10 +497,10 @@ class TestAOIScanStepClass(unittest.TestCase): aoi_scan_step = GazeFeatures.AOIScanStep(movements, 'Test') -def build_aoi_scan_path(expected_aois, aoi_path): +def build_aoi_scan_path(expected_aoi, aoi_path): """Build AOI scan path""" - aoi_scan_path = GazeFeatures.AOIScanPath(expected_aois) + aoi_scan_path = GazeFeatures.AOIScanPath(expected_aoi) # Append a hidden last step to allow last given step creation aoi_path.append(aoi_path[-2]) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 0750cb5..122efe8 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -96,7 +96,7 @@ DEFAULT_ARLAYER_DRAW_PARAMETERS = { @dataclass class ArLayer(): """ - Defines a space where to make matching of gaze movements and AOIs and inside which those matchings need to be analyzed. + Defines a space where to make matching of gaze movements and AOI and inside which those matchings need to be analyzed. Parameters: name: name of the layer @@ -203,10 +203,10 @@ class ArLayer(): new_aoi_scene = AOI2DScene.AOI2DScene() # Edit expected AOI list by removing AOI with name equals to layer name - expected_aois = list(new_aoi_scene.keys()) + expected_aoi = list(new_aoi_scene.keys()) - if new_layer_name in expected_aois: - expected_aois.remove(new_layer_name) + if new_layer_name in expected_aoi: + expected_aoi.remove(new_layer_name) # Load aoi matcher try: @@ -230,13 +230,13 @@ class ArLayer(): try: new_aoi_scan_path_data = layer_data.pop('aoi_scan_path') - new_aoi_scan_path_data['expected_aois'] = expected_aois + new_aoi_scan_path_data['expected_aoi'] = expected_aoi new_aoi_scan_path = GazeFeatures.AOIScanPath(**new_aoi_scan_path_data) except KeyError: new_aoi_scan_path_data = {} - new_aoi_scan_path_data['expected_aois'] = expected_aois + new_aoi_scan_path_data['expected_aoi'] = expected_aoi new_aoi_scan_path = None # Load AOI scan path analyzers @@ -1208,7 +1208,7 @@ class ArScene(): # Check that the frame have a layer named like this scene layer aoi_2d_scene = new_frame.layers[scene_layer_name].aoi_scene - # Transform 2D frame layer AOIs into 3D scene layer AOIs + # Transform 2D frame layer AOI into 3D scene layer AOI # Then, add them to scene layer scene_layer.aoi_scene |= aoi_2d_scene.dimensionalize(frame_3d, new_frame.size) @@ -1228,12 +1228,12 @@ class ArScene(): if frame_layer.aoi_scan_path is not None: # Edit expected AOI list by removing AOI with name equals to frame layer name - expected_aois = list(layer.aoi_scene.keys()) + expected_aoi = list(layer.aoi_scene.keys()) - if frame_layer_name in expected_aois: - expected_aois.remove(frame_layer_name) + if frame_layer_name in expected_aoi: + expected_aoi.remove(frame_layer_name) - frame_layer.aoi_scan_path.expected_aois = expected_aois + frame_layer.aoi_scan_path.expected_aoi = expected_aoi except KeyError: @@ -1353,7 +1353,7 @@ class ArCamera(ArFrame): continue - layer.aoi_scan_path.expected_aois = all_aoi_list + layer.aoi_scan_path.expected_aoi = all_aoi_list # Init a lock to share scene projections into camera frame between multiple threads self._frame_lock = threading.Lock() diff --git a/src/argaze/AreaOfInterest/AOI2DScene.py b/src/argaze/AreaOfInterest/AOI2DScene.py index 4dc47f4..a726b23 100644 --- a/src/argaze/AreaOfInterest/AOI2DScene.py +++ b/src/argaze/AreaOfInterest/AOI2DScene.py @@ -26,9 +26,9 @@ AOI3DSceneType = TypeVar('AOI3DScene', bound="AOI3DScene") class AOI2DScene(AOIFeatures.AOIScene): """Define AOI 2D scene.""" - def __init__(self, aois_2d: dict = None): + def __init__(self, aoi_2d: dict = None): - super().__init__(2, aois_2d) + super().__init__(2, aoi_2d) @classmethod def from_svg(self, svg_filepath: str) -> AOI2DSceneType: @@ -121,7 +121,7 @@ class AOI2DScene(AOIFeatures.AOIScene): yield name, aoi, matching def draw_raycast(self, image: numpy.array, pointer:tuple, exclude=[], base_color=(0, 0, 255), matching_color=(0, 255, 0)): - """Draw AOIs with their matching status.""" + """Draw AOI with their matching status.""" for name, aoi, matching in self.raycast(pointer): diff --git a/src/argaze/AreaOfInterest/AOI3DScene.py b/src/argaze/AreaOfInterest/AOI3DScene.py index bfe189a..33a815c 100644 --- a/src/argaze/AreaOfInterest/AOI3DScene.py +++ b/src/argaze/AreaOfInterest/AOI3DScene.py @@ -38,15 +38,15 @@ AOI2DSceneType = TypeVar('AOI2DScene', bound="AOI2DScene") class AOI3DScene(AOIFeatures.AOIScene): """Define AOI 3D scene.""" - def __init__(self, aois_3d: dict = None): + def __init__(self, aoi_3d: dict = None): - super().__init__(3, aois_3d) + super().__init__(3, aoi_3d) @classmethod def from_obj(self, obj_filepath: str) -> AOI3DSceneType: """Load AOI3D scene from .obj file.""" - aois_3d = {} + aoi_3d = {} # regex rules for .obj file parsing OBJ_RX_DICT = { @@ -111,12 +111,12 @@ class AOI3DScene(AOIFeatures.AOIScene): # retreive all aoi3D vertices and sort them in clockwise order for name, face in faces.items(): aoi3D = AOIFeatures.AreaOfInterest([ vertices[i-1] for i in reversed(face) ]) - aois_3d[name] = aoi3D + aoi_3d[name] = aoi3D except IOError: raise IOError(f'File not found: {obj_filepath}') - return AOI3DScene(aois_3d) + return AOI3DScene(aoi_3d) def to_obj(self, obj_filepath: str): """Save AOI3D scene into .obj file.""" diff --git a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py index f0decfc..6dadaba 100644 --- a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py +++ b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py @@ -34,7 +34,7 @@ class AOIMatcher(GazeFeatures.AOIMatcher): self.__look_count = 0 self.__looked_aoi_data = (None, None) self.__circle_ratio_sum = {} - self.__aois_coverages = {} + self.__aoi_coverages = {} self.__matched_gaze_movement = None self.__matched_region = None @@ -79,14 +79,14 @@ class AOIMatcher(GazeFeatures.AOIMatcher): self.__looked_aoi_data = most_likely_looked_aoi_data # Calculate looked aoi circle ratio means - self.__aois_coverages = {} + self.__aoi_coverages = {} for aoi_name, circle_ratio_sum in self.__circle_ratio_sum.items(): circle_ratio_mean = circle_ratio_sum / self.__look_count # filter circle ration mean greater than 1 - self.__aois_coverages[aoi_name] = circle_ratio_mean if circle_ratio_mean < 1 else 1 + self.__aoi_coverages[aoi_name] = circle_ratio_mean if circle_ratio_mean < 1 else 1 # Update matched gaze movement self.__matched_gaze_movement = gaze_movement @@ -95,7 +95,7 @@ class AOIMatcher(GazeFeatures.AOIMatcher): self.__matched_region = matched_region # Return - if self.__aois_coverages[most_likely_looked_aoi_data[0]] > self.coverage_threshold: + if self.__aoi_coverages[most_likely_looked_aoi_data[0]] > self.coverage_threshold: return self.__looked_aoi_data @@ -179,8 +179,8 @@ class AOIMatcher(GazeFeatures.AOIMatcher): return self.__looked_aoi_data[0] @property - def aois_coverages(self) -> dict: - """Get all aois coverage means for current fixation. + def aoi_coverages(self) -> dict: + """Get all aoi coverage means for current fixation. It represents the ratio of fixation deviation circle surface that used to cover the aoi.""" - return self.__aois_coverages \ No newline at end of file + return self.__aoi_coverages \ No newline at end of file diff --git a/src/argaze/GazeAnalysis/TransitionMatrix.py b/src/argaze/GazeAnalysis/TransitionMatrix.py index 6f408e4..b346b5a 100644 --- a/src/argaze/GazeAnalysis/TransitionMatrix.py +++ b/src/argaze/GazeAnalysis/TransitionMatrix.py @@ -42,7 +42,7 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer): row_sum = aoi_scan_path.transition_matrix.apply(lambda row: row.sum(), axis=1) # Editing transition matrix probabilities - # Note: when no transiton starts from an aoi, destination probabilites is equal to 1/S where S is the number of aois + # Note: when no transiton starts from an aoi, destination probabilites is equal to 1/S where S is the number of aoi self.__transition_matrix_probabilities = aoi_scan_path.transition_matrix.apply(lambda row: row.apply(lambda p: p / row_sum[row.name] if row_sum[row.name] > 0 else 1 / row_sum.size), axis=1) # Calculate matrix density diff --git a/src/argaze/GazeFeatures.py b/src/argaze/GazeFeatures.py index 2dd1cab..814753e 100644 --- a/src/argaze/GazeFeatures.py +++ b/src/argaze/GazeFeatures.py @@ -842,13 +842,13 @@ AOIScanPathType = TypeVar('AOIScanPathType', bound="AOIScanPathType") class AOIScanPath(list): """List of aoi scan steps over successive aoi.""" - def __init__(self, expected_aois: list[str] = [], duration_max: int|float = 0): + def __init__(self, expected_aoi: list[str] = [], duration_max: int|float = 0): super().__init__() self.duration_max = duration_max - self.expected_aois = expected_aois + self.expected_aoi = expected_aoi self.__duration = 0 @property @@ -903,13 +903,13 @@ class AOIScanPath(list): return sequence @property - def expected_aois(self): + def expected_aoi(self): """List of all expected aoi.""" - return self.__expected_aois + return self.__expected_aoi - @expected_aois.setter - def expected_aois(self, expected_aois: list[str] = []): + @expected_aoi.setter + def expected_aoi(self, expected_aoi: list[str] = []): """Edit list of all expected aoi. !!! warning @@ -917,15 +917,15 @@ class AOIScanPath(list): """ self.clear() - self.__expected_aois = expected_aois + self.__expected_aoi = expected_aoi self.__movements = TimeStampedGazeMovements() self.__current_aoi = '' self.__index = ord('A') self.__aoi_letter = {} self.__letter_aoi = {} - size = len(self.__expected_aois) - self.__transition_matrix = pandas.DataFrame(numpy.zeros((size, size)), index=self.__expected_aois, columns=self.__expected_aois) + size = len(self.__expected_aoi) + self.__transition_matrix = pandas.DataFrame(numpy.zeros((size, size)), index=self.__expected_aoi, columns=self.__expected_aoi) @property def current_aoi(self): @@ -953,7 +953,7 @@ class AOIScanPath(list): !!! warning It could raise AOIScanStepError""" - if looked_aoi not in self.__expected_aois: + if looked_aoi not in self.__expected_aoi: raise AOIScanStepError('AOI not expected', looked_aoi) @@ -1013,7 +1013,7 @@ class AOIScanPath(list): """Get how many fixations are there in the scan path and how many fixation are there in each aoi.""" scan_fixations_count = 0 - aoi_fixations_count = {aoi: 0 for aoi in self.__expected_aois} + aoi_fixations_count = {aoi: 0 for aoi in self.__expected_aoi} for aoi_scan_step in self: -- cgit v1.1 From bd8cb794b3e6500783df86ce1add1fe6382b2f70 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 11:48:27 +0200 Subject: Moving gaze movement positions drawing options into gaze movment identifier modules. --- src/argaze/ArFeatures.py | 14 ++++++------ src/argaze/GazeAnalysis/DeviationCircleCoverage.py | 7 +----- .../DispersionThresholdIdentification.py | 13 +++++++---- src/argaze/GazeAnalysis/FocusPointInside.py | 2 +- .../VelocityThresholdIdentification.py | 13 +++++++---- src/argaze/GazeFeatures.py | 10 ++++----- .../utils/demo_data/demo_gaze_analysis_setup.json | 26 ++++++++++++++++------ 7 files changed, 51 insertions(+), 34 deletions(-) (limited to 'src') diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 122efe8..cdb7130 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -1022,13 +1022,6 @@ class ArFrame(): self.scan_path.draw(image, **draw_scan_path) - # Draw layers if required - if draw_layers is not None: - - for layer_name, draw_layer in draw_layers.items(): - - self.layers[layer_name].draw(image, **draw_layer) - # Draw current fixation if required if draw_fixations is not None and self.gaze_movement_identifier is not None: @@ -1039,6 +1032,13 @@ class ArFrame(): self.gaze_movement_identifier.current_saccade.draw(image, **draw_saccades) + # Draw layers if required + if draw_layers is not None: + + for layer_name, draw_layer in draw_layers.items(): + + self.layers[layer_name].draw(image, **draw_layer) + # Draw current gaze position if required if draw_gaze_positions is not None: diff --git a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py index 6dadaba..d55d8c9 100644 --- a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py +++ b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py @@ -109,7 +109,7 @@ class AOIMatcher(GazeFeatures.AOIMatcher): return (None, None) - def draw(self, image: numpy.array, aoi_scene: AOIFeatures.AOIScene, draw_matched_fixation: dict = None, draw_matched_fixation_positions: dict = None, draw_matched_region: dict = None, draw_looked_aoi: dict = None, update_looked_aoi: bool = False, looked_aoi_name_color: tuple = None, looked_aoi_name_offset: tuple = (0, 0)): + def draw(self, image: numpy.array, aoi_scene: AOIFeatures.AOIScene, draw_matched_fixation: dict = None, draw_matched_region: dict = None, draw_looked_aoi: dict = None, update_looked_aoi: bool = False, looked_aoi_name_color: tuple = None, looked_aoi_name_offset: tuple = (0, 0)): """Draw matching into image. Parameters: @@ -132,11 +132,6 @@ class AOIMatcher(GazeFeatures.AOIMatcher): self.__matched_gaze_movement.draw(image, **draw_matched_fixation) - # Draw matched fixation positions if required - if draw_matched_fixation_positions is not None: - - self.__matched_gaze_movement.draw_positions(image, **draw_matched_fixation_positions) - # Draw matched aoi if self.looked_aoi.all() is not None: diff --git a/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py b/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py index 15fddf4..a7b9900 100644 --- a/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py +++ b/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py @@ -73,7 +73,7 @@ class Fixation(GazeFeatures.Fixation): return self - def draw(self, image: numpy.array, deviation_circle_color: tuple = None, duration_border_color: tuple = None, duration_factor: float = 1.): + def draw(self, image: numpy.array, deviation_circle_color: tuple = None, duration_border_color: tuple = None, duration_factor: float = 1., draw_positions: dict = None): """Draw fixation into image. Parameters: @@ -82,15 +82,20 @@ class Fixation(GazeFeatures.Fixation): duration_factor: how many pixels per duration unit """ + # Draw duration border if required + if duration_border_color is not None: + + cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), duration_border_color, int(self.duration * duration_factor)) + # Draw deviation circle if required if deviation_circle_color is not None: cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), deviation_circle_color, -1) - # Draw duration border if required - if duration_border_color is not None: + # Draw positions if required + if draw_positions is not None: - cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), duration_border_color, int(self.duration * duration_factor)) + self.draw_positions(image, **draw_positions) @dataclass(frozen=True) class Saccade(GazeFeatures.Saccade): diff --git a/src/argaze/GazeAnalysis/FocusPointInside.py b/src/argaze/GazeAnalysis/FocusPointInside.py index b3651e4..88cfbed 100644 --- a/src/argaze/GazeAnalysis/FocusPointInside.py +++ b/src/argaze/GazeAnalysis/FocusPointInside.py @@ -54,7 +54,7 @@ class AOIMatcher(GazeFeatures.AOIMatcher): return (None, None) - def draw(self, image: numpy.array, aoi_scene: AOIFeatures.AOIScene, draw_matched_fixation: dict = None, draw_matched_fixation_positions: dict = None, draw_looked_aoi: dict = None, looked_aoi_name_color: tuple = None, looked_aoi_name_offset: tuple = (0, 0)): + def draw(self, image: numpy.array, aoi_scene: AOIFeatures.AOIScene, draw_matched_fixation: dict = None, draw_looked_aoi: dict = None, looked_aoi_name_color: tuple = None, looked_aoi_name_offset: tuple = (0, 0)): """Draw matching into image. Parameters: diff --git a/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py b/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py index 64931f5..d10f666 100644 --- a/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py +++ b/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py @@ -72,7 +72,7 @@ class Fixation(GazeFeatures.Fixation): return self - def draw(self, image: numpy.array, deviation_circle_color: tuple = None, duration_border_color: tuple = None, duration_factor: float = 1.): + def draw(self, image: numpy.array, deviation_circle_color: tuple = None, duration_border_color: tuple = None, duration_factor: float = 1., draw_positions: dict = None): """Draw fixation into image. Parameters: @@ -81,15 +81,20 @@ class Fixation(GazeFeatures.Fixation): duration_factor: how many pixels per duration unit """ + # Draw duration border if required + if duration_border_color is not None: + + cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), duration_border_color, int(self.duration * duration_factor)) + # Draw deviation circle if required if deviation_circle_color is not None: cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), deviation_circle_color, -1) - # Draw duration border if required - if duration_border_color is not None: + # Draw positions if required + if draw_positions is not None: - cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), duration_border_color, int(self.duration * duration_factor)) + self.draw_positions(image, **draw_positions) @dataclass(frozen=True) class Saccade(GazeFeatures.Saccade): diff --git a/src/argaze/GazeFeatures.py b/src/argaze/GazeFeatures.py index 814753e..523bf2c 100644 --- a/src/argaze/GazeFeatures.py +++ b/src/argaze/GazeFeatures.py @@ -292,16 +292,16 @@ class GazeMovement(): ts_start, start_gaze_position = gaze_positions.pop_first() ts_next, next_gaze_position = gaze_positions.first - # Draw position if required - if position_color is not None: - - start_gaze_position.draw(image, position_color, draw_precision=False) - # Draw line between positions if required if line_color is not None: cv2.line(image, (int(start_gaze_position[0]), int(start_gaze_position[1])), (int(next_gaze_position[0]), int(next_gaze_position[1])), line_color, 1) + # Draw position if required + if position_color is not None: + + start_gaze_position.draw(image, position_color, draw_precision=False) + def draw(self, image: numpy.array, **kwargs): """Draw gaze movement into image.""" diff --git a/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json b/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json index 52945ae..fe5d197 100644 --- a/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json +++ b/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json @@ -8,6 +8,7 @@ "duration_min_threshold": 200 } }, + "filter_in_progress_identification": false, "scan_path": { "duration_max": 10000 }, @@ -59,8 +60,7 @@ }, "draw_saccades": { "line_color": [255, 0, 255] - }, - "deepness": 0 + } }, "draw_layers": { "main_layer": { @@ -72,11 +72,11 @@ }, "draw_aoi_matching": { "draw_matched_fixation": { - "deviation_circle_color": [255, 255, 255] - }, - "draw_matched_fixation_positions": { - "position_color": [0, 255, 255], - "line_color": [0, 0, 0] + "deviation_circle_color": [255, 255, 255], + "draw_positions": { + "position_color": [0, 255, 0], + "line_color": [0, 0, 0] + } }, "draw_matched_region": { "color": [0, 255, 0], @@ -91,6 +91,18 @@ } } }, + "draw_fixations": { + "deviation_circle_color": [255, 255, 255], + "duration_border_color": [127, 0, 127], + "duration_factor": 1e-2, + "draw_positions": { + "position_color": [0, 255, 255], + "line_color": [0, 0, 0] + } + }, + "draw_saccades": { + "line_color": [255, 0, 255] + }, "draw_gaze_positions": { "color": [0, 255, 255], "size": 2 -- cgit v1.1 From 5b27713d162e76a205ee46cff25e5d8fe993a15a Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 12:27:44 +0200 Subject: Fixing SVG loading. --- src/argaze/AreaOfInterest/AOI2DScene.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'src') diff --git a/src/argaze/AreaOfInterest/AOI2DScene.py b/src/argaze/AreaOfInterest/AOI2DScene.py index a726b23..f8599c5 100644 --- a/src/argaze/AreaOfInterest/AOI2DScene.py +++ b/src/argaze/AreaOfInterest/AOI2DScene.py @@ -67,11 +67,12 @@ class AOI2DScene(AOIFeatures.AOIScene): # Convert rect element into dict rect_dict = { - 'shape': 'rectangle', - 'x': float(rect.getAttribute('x')), - 'y': float(rect.getAttribute('y')), - 'width': float(rect.getAttribute('width')), - 'height': float(rect.getAttribute('height')) + "Rectangle": { + 'x': float(rect.getAttribute('x')), + 'y': float(rect.getAttribute('y')), + 'width': float(rect.getAttribute('width')), + 'height': float(rect.getAttribute('height')) + } } new_areas[rect.getAttribute('id')] = AOIFeatures.AreaOfInterest.from_dict(rect_dict) @@ -81,10 +82,11 @@ class AOI2DScene(AOIFeatures.AOIScene): # Convert circle element into dict circle_dict = { - 'shape': 'circle', - 'cx': float(circle.getAttribute('cx')), - 'cy': float(circle.getAttribute('cy')), - 'radius': float(circle.getAttribute('r')) + "Circle": { + 'cx': float(circle.getAttribute('cx')), + 'cy': float(circle.getAttribute('cy')), + 'radius': float(circle.getAttribute('r')) + } } new_areas[circle.getAttribute('id')] = AOIFeatures.AreaOfInterest.from_dict(circle_dict) -- cgit v1.1 From 1f16bf5c37b5fb5d44ed33d78f03e6fdeeac4013 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 12:28:15 +0200 Subject: Updating JSON AOI 2D description demo. --- src/argaze/utils/demo_data/aoi_2d_scene.json | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/argaze/utils/demo_data/aoi_2d_scene.json b/src/argaze/utils/demo_data/aoi_2d_scene.json index a0726e8..ac58b63 100644 --- a/src/argaze/utils/demo_data/aoi_2d_scene.json +++ b/src/argaze/utils/demo_data/aoi_2d_scene.json @@ -1,5 +1,18 @@ { - "RedSquare": [[268, 203], [576, 203], [576, 510], [268, 510]], "BlueTriangle":[[960, 664], [1113, 971], [806, 971]], - "GreenCircle":[[1497, 203], [1527, 206], [1556, 215], [1582, 229], [1605, 248], [1624, 271], [1639, 298], [1647, 327], [1650, 357], [1647, 387], [1639, 415], [1624, 442], [1605, 465], [1582, 484], [1556, 498], [1527, 507], [1497, 510], [1467, 507], [1438, 498], [1411, 484], [1388, 465], [1369, 442], [1355, 415], [1346, 387], [1343, 357], [1346, 327], [1355, 298], [1369, 271], [1388, 248], [1411, 229], [1438, 215], [1467, 206]] + "RedSquare": { + "Rectangle": { + "x": 268, + "y": 203, + "width": 308, + "height": 308 + } + }, + "GreenCircle": { + "Circle": { + "cx": 1497, + "cy": 356, + "radius": 153 + } + } } \ No newline at end of file -- cgit v1.1 From 69d9b4d26d2956d65e0c24fe262071b0c4569c90 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 13:03:25 +0200 Subject: Removing drawing fixation positions option from FocusPointInside module. --- src/argaze/GazeAnalysis/FocusPointInside.py | 5 ----- 1 file changed, 5 deletions(-) (limited to 'src') diff --git a/src/argaze/GazeAnalysis/FocusPointInside.py b/src/argaze/GazeAnalysis/FocusPointInside.py index 88cfbed..24c319e 100644 --- a/src/argaze/GazeAnalysis/FocusPointInside.py +++ b/src/argaze/GazeAnalysis/FocusPointInside.py @@ -76,11 +76,6 @@ class AOIMatcher(GazeFeatures.AOIMatcher): self.__matched_gaze_movement.draw(image, **draw_matched_fixation) - # Draw matched fixation positions if required - if draw_matched_fixation_positions is not None: - - self.__matched_gaze_movement.draw_positions(image, **draw_matched_fixation_positions) - # Draw matched aoi if self.looked_aoi.all() is not None: -- cgit v1.1 From 73a71de88dc34374a0e0f6366c3f13714438dfe3 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 13:03:55 +0200 Subject: Updating aruco markers demo configuration. --- .../utils/demo_data/demo_aruco_markers_setup.json | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) (limited to 'src') diff --git a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json index 5168297..e2edc8c 100644 --- a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json +++ b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json @@ -57,7 +57,7 @@ "background": "frame_background.jpg", "gaze_movement_identifier": { "DispersionThresholdIdentification": { - "deviation_max_threshold": 25, + "deviation_max_threshold": 50, "duration_min_threshold": 200 } }, @@ -80,12 +80,12 @@ "heatmap_weight": 0.5, "draw_scan_path": { "draw_fixations": { - "deviation_circle_color": [0, 255, 255], - "duration_border_color": [0, 127, 127], + "deviation_circle_color": [255, 0, 255], + "duration_border_color": [127, 0, 127], "duration_factor": 1e-2 }, "draw_saccades": { - "line_color": [0, 255, 255] + "line_color": [255, 0, 255] } }, "draw_layers": { @@ -100,10 +100,6 @@ "draw_matched_fixation": { "deviation_circle_color": [255, 255, 255] }, - "draw_matched_fixation_positions": { - "position_color": [0, 255, 255], - "line_color": [0, 0, 0] - }, "draw_looked_aoi": { "color": [0, 255, 0], "border_size": 2 @@ -113,6 +109,15 @@ } } }, + "draw_fixations": { + "deviation_circle_color": [255, 255, 255], + "duration_border_color": [127, 0, 127], + "duration_factor": 1e-2, + "draw_positions": { + "position_color": [0, 255, 255], + "line_color": [0, 0, 0] + } + }, "draw_gaze_positions": { "color": [0, 255, 255], "size": 2 -- cgit v1.1 From 9d3cebdf1e3c780c5e1ec6ccc198bcf79e258ee8 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 17:37:05 +0200 Subject: Adding annotations. --- src/argaze/ArFeatures.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index cdb7130..edeac6b 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -1410,7 +1410,12 @@ class ArCamera(ArFrame): yield scene_frame def watch(self, image: numpy.array) -> Tuple[float, dict]: - """Detect AR features from image and project scenes into camera frame.""" + """Detect AR features from image and project scenes into camera frame. + + Returns: + - detection_time: AR features detection time in ms + - exceptions: dictionary with exception raised per scene + """ raise NotImplementedError('watch() method not implemented') -- cgit v1.1 From 5838151f49e2ccfdf5bc2bc153cf5b493178bb09 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 17:37:35 +0200 Subject: Adding draw_scenes to aruco markers demo. --- src/argaze/utils/demo_data/demo_aruco_markers_setup.json | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'src') diff --git a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json index e2edc8c..c881452 100644 --- a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json +++ b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json @@ -41,6 +41,20 @@ "z": 100, "point_size": 1, "point_color": [0, 0, 255] + }, + "draw_scenes": { + "ArScene Demo": { + "draw_aruco_markers_group": { + "draw_axes": { + "thickness": 3, + "length": 10 + }, + "draw_places": { + "color": [0, 0, 0], + "border_size": 1 + } + } + } } }, "scenes": { -- cgit v1.1 From 06548cb7cb807f42fa42e4777288f67f259ae64a Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 17:43:56 +0200 Subject: Fixing errors returned by mkdocs serve. --- src/argaze/GazeAnalysis/DeviationCircleCoverage.py | 1 - src/argaze/GazeAnalysis/FocusPointInside.py | 1 - 2 files changed, 2 deletions(-) (limited to 'src') diff --git a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py index d55d8c9..f57d432 100644 --- a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py +++ b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py @@ -116,7 +116,6 @@ class AOIMatcher(GazeFeatures.AOIMatcher): image: where to draw aoi_scene: to refresh looked aoi if required draw_matched_fixation: Fixation.draw parameters (which depends of the loaded gaze movement identifier module, if None, no fixation is drawn) - draw_matched_fixation_positions: GazeMovement.draw_positions parameters (if None, no fixation is drawn) draw_matched_region: AOIFeatures.AOI.draw parameters (if None, no matched region is drawn) draw_looked_aoi: AOIFeatures.AOI.draw parameters (if None, no looked aoi is drawn) looked_aoi_name_color: color of text (if None, no looked aoi name is drawn) diff --git a/src/argaze/GazeAnalysis/FocusPointInside.py b/src/argaze/GazeAnalysis/FocusPointInside.py index 24c319e..81a9d20 100644 --- a/src/argaze/GazeAnalysis/FocusPointInside.py +++ b/src/argaze/GazeAnalysis/FocusPointInside.py @@ -61,7 +61,6 @@ class AOIMatcher(GazeFeatures.AOIMatcher): image: where to draw aoi_scene: to refresh looked aoi if required draw_matched_fixation: Fixation.draw parameters (which depends of the loaded gaze movement identifier module, if None, no fixation is drawn) - draw_matched_fixation_positions: GazeMovement.draw_positions parameters (if None, no fixation is drawn) draw_looked_aoi: AOIFeatures.AOI.draw parameters (if None, no looked aoi is drawn) looked_aoi_name_color: color of text (if None, no looked aoi name is drawn) looked_aoi_name_offset: ofset of text from the upper left aoi bounding box corner -- cgit v1.1 From 3b523bb755d706ce945e3b9d93416909021f8e58 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 27 Sep 2023 09:57:42 +0200 Subject: Changing DetectorParameter printing. --- src/argaze/ArUcoMarkers/ArUcoDetector.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py index 82c9394..3260d00 100644 --- a/src/argaze/ArUcoMarkers/ArUcoDetector.py +++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py @@ -98,8 +98,16 @@ class DetectorParameters(): return DetectorParameters(**json.load(configuration_file)) - def __str__(self, print_all=False) -> str: - """Detector paremeters string representation.""" + def __str__(self) -> str: + """Detector parameters string representation.""" + + return f'{self}' + + def __format__(self, spec) -> str: + """Formated detector parameters string representation. + + Parameters: + spec: 'modified' to get only modified parameters.""" output = '' @@ -109,7 +117,7 @@ class DetectorParameters(): output += f'\t*{parameter}: {getattr(self.__parameters, parameter)}\n' - elif print_all: + elif spec == "": output += f'\t{parameter}: {getattr(self.__parameters, parameter)}\n' -- cgit v1.1 From eddfdc69b27c1b32ba0001ba4f147810eabec549 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 27 Sep 2023 18:03:29 +0200 Subject: Adding annotation. --- src/argaze/ArUcoMarkers/ArUcoDetector.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py index 3260d00..305bee2 100644 --- a/src/argaze/ArUcoMarkers/ArUcoDetector.py +++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py @@ -103,7 +103,7 @@ class DetectorParameters(): return f'{self}' - def __format__(self, spec) -> str: + def __format__(self, spec: str) -> str: """Formated detector parameters string representation. Parameters: -- cgit v1.1 From 66b84b019fe760a2cb9901a9f17b2d202d935ba4 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 27 Sep 2023 18:03:57 +0200 Subject: Allowing to load ellipse from SVG file. --- src/argaze/AreaOfInterest/AOI2DScene.py | 15 +++++++++++++++ src/argaze/AreaOfInterest/AOIFeatures.py | 11 +++++++++++ 2 files changed, 26 insertions(+) (limited to 'src') diff --git a/src/argaze/AreaOfInterest/AOI2DScene.py b/src/argaze/AreaOfInterest/AOI2DScene.py index f8599c5..062044f 100644 --- a/src/argaze/AreaOfInterest/AOI2DScene.py +++ b/src/argaze/AreaOfInterest/AOI2DScene.py @@ -91,6 +91,21 @@ class AOI2DScene(AOIFeatures.AOIScene): new_areas[circle.getAttribute('id')] = AOIFeatures.AreaOfInterest.from_dict(circle_dict) + # Load SVG ellipse + for ellipse in description_file.getElementsByTagName('ellipse'): + + # Convert ellipse element into dict + ellipse_dict = { + "Ellipse": { + 'cx': float(circle.getAttribute('cx')), + 'cy': float(circle.getAttribute('cy')), + 'rx': float(circle.getAttribute('rx')), + 'ry': float(circle.getAttribute('ry')) + } + } + + new_areas[ellipse.getAttribute('id')] = AOIFeatures.AreaOfInterest.from_dict(ellipse_dict) + return AOI2DScene(new_areas) def draw(self, image: numpy.array, draw_aoi: dict = None, exclude=[]): diff --git a/src/argaze/AreaOfInterest/AOIFeatures.py b/src/argaze/AreaOfInterest/AOIFeatures.py index dfbb165..2d5b9b1 100644 --- a/src/argaze/AreaOfInterest/AOIFeatures.py +++ b/src/argaze/AreaOfInterest/AOIFeatures.py @@ -78,6 +78,17 @@ class AreaOfInterest(numpy.ndarray): return AreaOfInterest(points) + elif shape == 'Ellipse': + + cx = shape_data.pop('cx') + cy = shape_data.pop('cy') + rx = shape_data.pop('rx') + ry = shape_data.pop('ry') + + # TODO: Use pygeos + N = 32 + points = [(math.cos(2*math.pi / N*x) * rx + cx, math.sin(2*math.pi / N*x) * ry + cy) for x in range(0, N+1)] + @property def dimension(self) -> int: """Number of axis coding area points positions.""" -- cgit v1.1 From 34c69b2370598476cffb4ec063b8cab7f201b143 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Thu, 28 Sep 2023 23:31:47 +0200 Subject: Fixing code annotations. --- src/argaze/ArFeatures.py | 17 +++++------ src/argaze/ArUcoMarkers/ArUcoCamera.py | 15 +++++----- src/argaze/ArUcoMarkers/ArUcoDetector.py | 22 ++++++++------ src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 43 ++++++++++++++-------------- 4 files changed, 50 insertions(+), 47 deletions(-) (limited to 'src') diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index edeac6b..84eae12 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -478,7 +478,7 @@ class ArLayer(): Parameters: draw_aoi_scene: AreaOfInterest.AOI2DScene.draw parameters (if None, no aoi scene is drawn) - draw_aoi_matching: AOIMatcher.draw parameters (which depends of the loaded aoi matcher module, if None, no aoi matching is drawn) + draw_aoi_matching: AOIMatcher.draw parameters (which depends of the loaded aoi matcher module, if None, no aoi matching is drawn) """ # Use draw_parameters attribute if no parameters @@ -1069,15 +1069,10 @@ class ArScene(): Define abstract Augmented Reality scene with ArLayers and ArFrames inside. Parameters: - name: name of the scene - layers: dictionary of ArLayers to project once the pose is estimated: see [project][argaze.ArFeatures.ArScene.project] function below. - frames: dictionary to ArFrames to project once the pose is estimated: see [project][argaze.ArFeatures.ArScene.project] function below. - angle_tolerance: Optional angle error tolerance to validate marker pose in degree used into [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function. - distance_tolerance: Optional distance error tolerance to validate marker pose in centimeter used into [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function. """ name: str @@ -1413,8 +1408,8 @@ class ArCamera(ArFrame): """Detect AR features from image and project scenes into camera frame. Returns: - - detection_time: AR features detection time in ms - - exceptions: dictionary with exception raised per scene + detection time: AR features detection time in ms. + exception: dictionary with exception raised per scene. """ raise NotImplementedError('watch() method not implemented') @@ -1422,7 +1417,8 @@ class ArCamera(ArFrame): def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition): """Project timestamped gaze position into each scene frames. - !!! warning watch method needs to be called first. + !!! warning + watch method needs to be called first. """ # Can't use camera frame when it is locked @@ -1471,7 +1467,8 @@ class ArCamera(ArFrame): def map(self): """Project camera frame background into scene frames background. - .. warning:: watch method needs to be called first. + !!! warning + watch method needs to be called first. """ # Can't use camera frame when it is locked diff --git a/src/argaze/ArUcoMarkers/ArUcoCamera.py b/src/argaze/ArUcoMarkers/ArUcoCamera.py index 4c3f042..4f00a3a 100644 --- a/src/argaze/ArUcoMarkers/ArUcoCamera.py +++ b/src/argaze/ArUcoMarkers/ArUcoCamera.py @@ -37,6 +37,7 @@ class ArUcoCamera(ArFeatures.ArCamera): """ Define an ArCamera based on ArUco marker detection. + Parameters: aruco_detector: ArUco marker detector """ @@ -144,9 +145,9 @@ class ArUcoCamera(ArFeatures.ArCamera): """Detect environment aruco markers from image and project scenes into camera frame. Returns: - - detection_time: aruco marker detection time in ms - - exceptions: dictionary with exception raised per scene - """ + detection time: aruco marker detection time in ms. + exception: dictionary with exception raised per scene. + """ # Detect aruco markers detection_time = self.aruco_detector.detect_markers(image) @@ -215,10 +216,10 @@ class ArUcoCamera(ArFeatures.ArCamera): """Get frame image with ArUco detection visualisation. Parameters: - draw_detected_markers: ArucoMarker.draw parameters (if None, no marker drawn) - draw_scenes: ArUcoScene.draw parameters (if None, no scene drawn) - draw_optic_parameters_grid: OpticParameter.draw parameters (if None, no grid drawn) - kwargs: ArCamera.image parameters + draw_detected_markers: ArucoMarker.draw parameters (if None, no marker drawn) + draw_scenes: ArUcoScene.draw parameters (if None, no scene drawn) + draw_optic_parameters_grid: OpticParameter.draw parameters (if None, no grid drawn) + kwargs: ArCamera.image parameters """ # Can't use camera frame when it is locked diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py index 305bee2..490b75b 100644 --- a/src/argaze/ArUcoMarkers/ArUcoDetector.py +++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py @@ -38,7 +38,8 @@ ArUcoDetectorType = TypeVar('ArUcoDetector', bound="ArUcoDetector") class DetectorParameters(): """Wrapper class around ArUco marker detector parameters. - .. note:: More details on [opencv page](https://docs.opencv.org/4.x/d1/dcd/structcv_1_1aruco_1_1DetectorParameters.html) + !!! note + More details on [opencv page](https://docs.opencv.org/4.x/d1/dcd/structcv_1_1aruco_1_1DetectorParameters.html) """ __parameters = aruco.DetectorParameters() @@ -107,7 +108,8 @@ class DetectorParameters(): """Formated detector parameters string representation. Parameters: - spec: 'modified' to get only modified parameters.""" + spec: 'modified' to get only modified parameters. + """ output = '' @@ -257,11 +259,11 @@ class ArUcoDetector(): def detect_markers(self, image: numpy.array) -> float: """Detect all ArUco markers into an image. - .. danger:: DON'T MIRROR IMAGE - It makes the markers detection to fail. + !!! danger "DON'T MIRROR IMAGE" + It makes the markers detection to fail. Returns: - - detection time: marker detection time in ms + detection time: marker detection time in ms. """ # Reset detected markers data @@ -369,8 +371,8 @@ class ArUcoDetector(): def detect_board(self, image: numpy.array, board, expected_markers_number): """Detect ArUco markers board in image setting up the number of detected markers needed to agree detection. - .. danger:: DON'T MIRROR IMAGE - It makes the markers detection to fail. + !!! danger "DON'T MIRROR IMAGE" + It makes the markers detection to fail. """ # detect markers from gray picture @@ -406,9 +408,11 @@ class ArUcoDetector(): @property def detection_metrics(self) -> Tuple[int, dict]: """Get marker detection metrics. + Returns: - number of detect function call - dict with number of detection for each marker identifier""" + number of detect function call + dict with number of detection for each marker identifier + """ return self.__detection_count, Counter(self.__detected_ids) diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index 5b6c69d..4a43965 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -79,29 +79,31 @@ def make_euler_rotation_vector(R): @dataclass(frozen=True) class Place(): - """Define a place as a pose and a marker.""" + """Define a place as a pose and a marker. - translation: numpy.array - """Position in group referential.""" + Parameters: + translation: position in group referential. + rotation: rotation in group referential. + marker: ArUco marker linked to the place. + """ + translation: numpy.array rotation: numpy.array - """Rotation in group referential.""" - marker: dict - """ArUco marker linked to the place.""" @dataclass class ArUcoMarkersGroup(): - """Handle group of ArUco markers as one unique spatial entity and estimate its pose.""" + """Handle group of ArUco markers as one unique spatial entity and estimate its pose. - marker_size: float = field(default=0.) - """Expected size of all markers in the group.""" + Parameters: + marker_size: expected size of all markers in the group. + dictionary: expected dictionary of all markers in the group. + places: expected markers place. + """ + marker_size: float = field(default=0.) dictionary: ArUcoMarkersDictionary.ArUcoMarkersDictionary = field(default_factory=ArUcoMarkersDictionary.ArUcoMarkersDictionary) - """Expected dictionary of all markers in the group.""" - places: dict = field(default_factory=dict) - """Expected markers place""" def __post_init__(self): """Init group pose and places pose.""" @@ -166,13 +168,13 @@ class ArUcoMarkersGroup(): """Load ArUco markers group from .obj file. !!! note - Expected object (o) name format: #_Marker + Expected object (o) name format: #_Marker !!! note - All markers have to belong to the same dictionary. + All markers have to belong to the same dictionary. !!! note - Marker normal vectors (vn) expected. + Marker normal vectors (vn) expected. """ @@ -360,8 +362,8 @@ class ArUcoMarkersGroup(): """Sort markers belonging to the group from given detected markers dict (cf ArUcoDetector.detect_markers()). Returns: - dict of markers belonging to this group - dict of remaining markers not belonging to this group + dict of markers belonging to this group + dict of remaining markers not belonging to this group """ group_markers = {} @@ -434,9 +436,9 @@ class ArUcoMarkersGroup(): """Evaluate if given markers configuration match related places configuration. Returns: - dict of consistent markers - dict of unconsistent markers - dict of identified distance or angle unconsistencies and out-of-bounds values + dict of consistent markers + dict of unconsistent markers + dict of identified distance or angle unconsistencies and out-of-bounds values """ consistent_markers = {} @@ -684,7 +686,6 @@ class ArUcoMarkersGroup(): """Draw group axes and places. Parameters: - draw_axes: draw_axes parameters (if None, no axes drawn) draw_places: draw_places parameters (if None, no places drawn) draw_places_axes: draw_places_axes parameters (if None, no places axes drawn) -- cgit v1.1 From 134542a822ea1ff6a7778fcff1cb460ee13cf4a2 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 3 Oct 2023 11:23:43 +0200 Subject: Fixing SVG file loading. --- src/argaze/ArFeatures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 84eae12..43acf55 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -183,7 +183,7 @@ class ArLayer(): # SVG file format for 2D dimension only if file_format == 'svg': - new_aoi_scene = AOIFeatures.AOI2DScene.from_svg(filepath) + new_aoi_scene = AOI2DScene.AOI2DScene.from_svg(filepath) # OBJ file format for 3D dimension only elif file_format == 'obj': -- cgit v1.1 From 59d47fa2ea032c5d47812e9b7b5e80cec344f59a Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 4 Oct 2023 19:37:04 +0200 Subject: minor --- src/argaze/GazeFeatures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/argaze/GazeFeatures.py b/src/argaze/GazeFeatures.py index 523bf2c..bd1a3da 100644 --- a/src/argaze/GazeFeatures.py +++ b/src/argaze/GazeFeatures.py @@ -847,8 +847,8 @@ class AOIScanPath(list): super().__init__() self.duration_max = duration_max - self.expected_aoi = expected_aoi + self.__duration = 0 @property -- cgit v1.1 From c55fccdb1e8c26ea08c1cb36fa9178cfbc89dba8 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 4 Oct 2023 19:37:36 +0200 Subject: Adding aoi fixation distribution to Basic module. --- src/argaze/GazeAnalysis/Basic.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/argaze/GazeAnalysis/Basic.py b/src/argaze/GazeAnalysis/Basic.py index 7b41731..dc7b4fd 100644 --- a/src/argaze/GazeAnalysis/Basic.py +++ b/src/argaze/GazeAnalysis/Basic.py @@ -79,12 +79,27 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer): self.__steps_number = len(aoi_scan_path) sum_fixation_durations = 0 + self.__sum_aoi_fixation_durations = {} for aoi_scan_step in aoi_scan_path: sum_fixation_durations += aoi_scan_step.fixation_duration + try: + + self.__sum_aoi_fixation_durations[aoi_scan_step.aoi] = self.__sum_aoi_fixation_durations[aoi_scan_step.aoi] + aoi_scan_step.fixation_duration + + except KeyError: + + self.__sum_aoi_fixation_durations[aoi_scan_step.aoi] = aoi_scan_step.fixation_duration + self.__step_fixation_durations_average = sum_fixation_durations / self.__steps_number + + self.__aoi_fixation_distribution = {} + + for aoi_name, sum_aoi_fixation_duration in self.__sum_aoi_fixation_durations.items(): + + self.__aoi_fixation_distribution[aoi_name] = sum_aoi_fixation_duration / sum_fixation_durations @property def path_duration(self) -> float: @@ -102,4 +117,10 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer): def step_fixation_durations_average(self) -> float: """AOI scan path step fixation durations average.""" - return self.__step_fixation_durations_average \ No newline at end of file + return self.__step_fixation_durations_average + + @property + def aoi_fixation_distribution(self) -> dict: + """percentage of time spent on each AOI.""" + + return self.__aoi_fixation_distribution \ No newline at end of file -- cgit v1.1 From ff2c7b8db71755576048e5c0ee9ec59a581c07fa Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 4 Oct 2023 19:41:01 +0200 Subject: Excluding AOI frame from aoi matching and from expected aoi. --- src/argaze/ArFeatures.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 43acf55..545bc8b 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -1330,25 +1330,42 @@ class ArCamera(ArFrame): scene.parent = self # Setup expected aoi of each layer aoi scan path with the aoi of corresponding scene layer + # Edit aoi matcher exclude attribute to ignore frame aoi for layer_name, layer in self.layers.items(): if layer.aoi_scan_path is not None: - all_aoi_list = [] + expected_aoi_list = [] + exclude_aoi_list = [] for scene_name, scene in self.scenes.items(): + # Append scene layer aoi to corresponding expected camera layer aoi try: scene_layer = scene.layers[layer_name] - all_aoi_list.extend(list(scene_layer.aoi_scene.keys())) + expected_aoi_list.extend(list(scene_layer.aoi_scene.keys())) except KeyError: continue - layer.aoi_scan_path.expected_aoi = all_aoi_list + # Remove scene frame from expected camera layer aoi + # Exclude scene frame from camera layer aoi matching + for frame_name, frame in scene.frames.items(): + + try: + + expected_aoi_list.remove(frame_name) + exclude_aoi_list.append(frame_name) + + except ValueError: + + continue + + layer.aoi_scan_path.expected_aoi = expected_aoi_list + layer.aoi_matcher.exclude = exclude_aoi_list # Init a lock to share scene projections into camera frame between multiple threads self._frame_lock = threading.Lock() -- cgit v1.1 From cdf4e23d2876b7a43b5a3712467d503723fa7a52 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Thu, 5 Oct 2023 11:38:13 +0200 Subject: removing ignored gaze position table. --- src/argaze/ArFeatures.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'src') diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 545bc8b..b9b51d0 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -1369,9 +1369,6 @@ class ArCamera(ArFrame): # Init a lock to share scene projections into camera frame between multiple threads self._frame_lock = threading.Lock() - - # Define public timestamp buffer to store ignored gaze positions - self.ignored_gaze_positions = GazeFeatures.TimeStampedGazePositions() def __str__(self) -> str: """ @@ -1438,14 +1435,11 @@ class ArCamera(ArFrame): watch method needs to be called first. """ - # Can't use camera frame when it is locked - if self._frame_lock.locked(): - - # TODO: Store ignored timestamped gaze positions for further projections - # PB: This would imply to also store frame projections !!! - self.ignored_gaze_positions[timestamp] = gaze_position + # Can't use camera frame while it is locked + # TODO? Do we need a timeout parameter here? + while self._frame_lock.locked(): - return None, None + time.sleep(1e-6) # Lock camera frame exploitation self._frame_lock.acquire() -- cgit v1.1 From 64df8beaf90d9f0bbaf0b1b51dae225f86c6a4c4 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Thu, 5 Oct 2023 13:48:33 +0200 Subject: Working on gaze processing time assessment. --- src/argaze/ArFeatures.py | 68 +++++++++++++++++++++++------------------------- 1 file changed, 32 insertions(+), 36 deletions(-) (limited to 'src') diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index b9b51d0..02da0fe 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -374,6 +374,9 @@ class ArLayer(): # Lock layer exploitation self.__look_lock.acquire() + # Store look execution start date + look_start = time.perf_counter() + # Update current gaze movement self.__gaze_movement = gaze_movement @@ -452,23 +455,13 @@ class ArLayer(): looked_aoi = None aoi_scan_path_analysis = {} exception = e + + # Assess total execution time in ms + execution_times['total'] = (time.perf_counter() - look_start) * 1e3 # Unlock layer exploitation self.__look_lock.release() - # Sum all execution times - total_execution_time = 0 - - if execution_times['aoi_matcher']: - - total_execution_time += execution_times['aoi_matcher'] - - for _, aoi_scan_path_analysis_time in execution_times['aoi_scan_step_analyzers'].items(): - - total_execution_time += aoi_scan_path_analysis_time - - execution_times['total'] = total_execution_time - # Return look data return looked_aoi, aoi_scan_path_analysis, execution_times, exception @@ -832,6 +825,9 @@ class ArFrame(): # Lock frame exploitation self.__look_lock.acquire() + # Store look execution start date + look_start = time.perf_counter() + # Update current gaze position self.__gaze_position = gaze_position @@ -950,30 +946,12 @@ class ArFrame(): scan_step_analysis = {} layer_analysis = {} exception = e - - # Unlock frame exploitation - self.__look_lock.release() - - # Sum all execution times - total_execution_time = 0 - - if execution_times['gaze_movement_identifier']: - total_execution_time += execution_times['gaze_movement_identifier'] + # Assess total execution time in ms + execution_times['total'] = (time.perf_counter() - look_start) * 1e3 - for _, scan_step_analysis_time in execution_times['scan_step_analyzers'].items(): - - total_execution_time += scan_step_analysis_time - - if execution_times['heatmap']: - - total_execution_time += execution_times['heatmap'] - - for _, layer_execution_times in execution_times['layers'].items(): - - total_execution_time += layer_execution_times['total'] - - execution_times['total'] = total_execution_time + # Unlock frame exploitation + self.__look_lock.release() # Return look data return identified_gaze_movement, scan_step_analysis, layer_analysis, execution_times, exception @@ -1431,15 +1409,33 @@ class ArCamera(ArFrame): def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition): """Project timestamped gaze position into each scene frames. + Parameters: + timestamp: gaze position time stamp (unit does'nt matter) + gaze_position: GazePosition object + timeout: maximal waiting time in ms + !!! warning watch method needs to be called first. """ # Can't use camera frame while it is locked - # TODO? Do we need a timeout parameter here? + wait_start = time.perf_counter() + waiting_time = 0 + while self._frame_lock.locked(): time.sleep(1e-6) + waiting_time = (time.perf_counter() - wait_start) * 1e3 + + # TODO? return waiting time? + + # TODO? add timeout parameter? + #if waiting_time > timeout: + # return None, None + + # DEBUG + if waiting_time > 0: + print(f'ArCamera: waiting {waiting_time:.3f} ms before to process gaze position at {timestamp} time.') # Lock camera frame exploitation self._frame_lock.acquire() -- cgit v1.1 From 8bb3cec466ace640c27b41106cac7f6a09dfcdbd Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Thu, 5 Oct 2023 22:07:12 +0200 Subject: Returning proejction time. --- src/argaze/ArUcoMarkers/ArUcoCamera.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/argaze/ArUcoMarkers/ArUcoCamera.py b/src/argaze/ArUcoMarkers/ArUcoCamera.py index 4f00a3a..33f5b37 100644 --- a/src/argaze/ArUcoMarkers/ArUcoCamera.py +++ b/src/argaze/ArUcoMarkers/ArUcoCamera.py @@ -11,6 +11,7 @@ from typing import TypeVar, Tuple from dataclasses import dataclass, field import json import os +import time from argaze import ArFeatures, DataStructures from argaze.ArUcoMarkers import ArUcoMarkersDictionary, ArUcoDetector, ArUcoOpticCalibrator, ArUcoScene @@ -146,6 +147,7 @@ class ArUcoCamera(ArFeatures.ArCamera): Returns: detection time: aruco marker detection time in ms. + projection time: scenes projection time in ms. exception: dictionary with exception raised per scene. """ @@ -155,6 +157,9 @@ class ArUcoCamera(ArFeatures.ArCamera): # Lock camera frame exploitation self._frame_lock.acquire() + # Store projection execution start date + projection_start = time.perf_counter() + # Fill camera frame background with image self.background = image @@ -206,11 +211,14 @@ class ArUcoCamera(ArFeatures.ArCamera): exceptions[scene_name] = e + # Assess projection time in ms + projection_time = (time.perf_counter() - projection_start) * 1e3 + # Unlock camera frame exploitation self._frame_lock.release() - # Return dection time and exceptions - return detection_time, exceptions + # Return detection time, projection time and exceptions + return detection_time, projection_time, exceptions def __image(self, draw_detected_markers: dict = None, draw_scenes: dict = None, draw_optic_parameters_grid: dict = None, **kwargs: dict) -> numpy.array: """Get frame image with ArUco detection visualisation. -- cgit v1.1 From 8640b63b5b607ed0e197cb63428ae94b0baa98a7 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Thu, 5 Oct 2023 22:07:49 +0200 Subject: Using better detection parameters. Changing grid color. --- src/argaze/utils/demo_data/demo_aruco_markers_setup.json | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json index c881452..7a4f6d1 100644 --- a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json +++ b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json @@ -5,9 +5,10 @@ "dictionary": "DICT_APRILTAG_16h5", "marker_size": 5, "parameters": { - "cornerRefinementMethod": 1, + "cornerRefinementMethod": 3, "aprilTagQuadSigma": 2, - "aprilTagDeglitch": 1 + "aprilTagDeglitch": 1, + "useAruco3Detection": 1 } }, "layers": { @@ -40,7 +41,7 @@ "height": 72, "z": 100, "point_size": 1, - "point_color": [0, 0, 255] + "point_color": [127, 127, 127] }, "draw_scenes": { "ArScene Demo": { -- cgit v1.1 From 1e39f54a7222ed53c7c514be555e06aa5e7372b7 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Thu, 5 Oct 2023 22:08:18 +0200 Subject: Improving time assessment. --- src/argaze/utils/demo_aruco_markers_run.py | 71 +++++++++++++++++++++++++----- 1 file changed, 61 insertions(+), 10 deletions(-) (limited to 'src') diff --git a/src/argaze/utils/demo_aruco_markers_run.py b/src/argaze/utils/demo_aruco_markers_run.py index 6dc081d..6c22695 100644 --- a/src/argaze/utils/demo_aruco_markers_run.py +++ b/src/argaze/utils/demo_aruco_markers_run.py @@ -14,6 +14,7 @@ import time from argaze import ArFeatures, GazeFeatures from argaze.ArUcoMarkers import ArUcoCamera +from argaze.utils import UtilsFeatures import cv2 import numpy @@ -40,9 +41,29 @@ def main(): # Init timestamp start_time = time.time() + # Prepare gaze analysis assessment + call_chrono = UtilsFeatures.TimeProbe() + call_chrono.start() + + gaze_positions_frequency = 0 + gaze_analysis_time = 0 + # Fake gaze position with mouse pointer def on_mouse_event(event, x, y, flags, param): + nonlocal gaze_positions_frequency + nonlocal gaze_analysis_time + + # Assess gaze analysis + lap_time, nb_laps, elapsed_time = call_chrono.lap() + + if elapsed_time > 1e3: + + gaze_positions_frequency = nb_laps + call_chrono.restart() + + gaze_analysis_time = 0 + # Edit millisecond timestamp timestamp = int((time.time() - start_time) * 1e3) @@ -54,12 +75,20 @@ def main(): gaze_movement, scan_step_analysis, layer_analysis, execution_times, exception = look_data - # Do something with look data - # ... + # Assess gaze analysis + gaze_analysis_time += execution_times['total'] # Attach mouse callback to window cv2.setMouseCallback(aruco_camera.name, on_mouse_event) + # Prepare video fps assessment + video_fps = 0 + video_chrono = UtilsFeatures.TimeProbe() + video_chrono.start() + + # Prepare visualisation time assessment + visualisation_time = 0 + # Enable camera video capture into separate thread video_capture = cv2.VideoCapture(int(args.source) if args.source.isdecimal() else args.source) @@ -69,30 +98,48 @@ def main(): # Capture images while video_capture.isOpened(): + # Assess capture time + capture_start = time.time() + # Read video image success, video_image = video_capture.read() + # Assess capture time + capture_time = int((time.time() - capture_start) * 1e3) + if success: + # Assess video fps + lap_time, nb_laps, elapsed_time = video_chrono.lap() + + if elapsed_time > 1e3: + + video_fps = nb_laps + video_chrono.restart() + # Detect and project AR features - detection_time, exceptions = aruco_camera.watch(video_image) + detection_time, projection_time, exceptions = aruco_camera.watch(video_image) + + # Assess visualisation time + visualisation_start = time.time() # Get ArUcoCamera frame image aruco_camera_image = aruco_camera.image() - # Write detection fps - cv2.rectangle(aruco_camera_image, (0, 0), (420, 50), (63, 63, 63), -1) - cv2.putText(aruco_camera_image, f'Detection fps: {1e3/detection_time:.1f}', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) + # Write time info + cv2.rectangle(aruco_camera_image, (0, 0), (aruco_camera.size[0], 100), (63, 63, 63), -1) + cv2.putText(aruco_camera_image, f'{video_fps} FPS | Capture {capture_time}ms | Detection {int(detection_time)}ms | Projection {int(projection_time)}ms | Visualisation {visualisation_time}ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) + cv2.putText(aruco_camera_image, f'{gaze_positions_frequency} gaze positions/s | Gaze analysis {gaze_analysis_time:.2f}ms', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) # Handle exceptions for i, (scene_name, e) in enumerate(exceptions.items()): # Write errors - cv2.rectangle(aruco_camera_image, (0, (i+1)*50), (720, (i+2)*50), (127, 127, 127), -1) - cv2.putText(aruco_camera_image, f'{scene_name} error: {e}', (20, (i+1)*90), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) + cv2.rectangle(aruco_camera_image, (0, (i+1)*100), (aruco_camera.size[0], (i+2)*80), (127, 127, 127), -1) + cv2.putText(aruco_camera_image, f'{scene_name} error: {e}', (20, (i+1)*140), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) # Write hint - cv2.putText(aruco_camera_image, 'Mouve mouse pointer over gray rectangle area', (450, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) + cv2.putText(aruco_camera_image, 'Mouve mouse pointer over gray rectangle area', (20, aruco_camera.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) # Display ArUcoCamera frame image cv2.imshow(aruco_camera.name, aruco_camera_image) @@ -104,11 +151,15 @@ def main(): cv2.imshow(f'{scene_frame.parent.name}:{scene_frame.name}', scene_frame.image()) # Stop by pressing 'Esc' key - if cv2.waitKey(10) == 27: + # NOTE: on MacOS, cv2.waitKey(1) waits ~40ms + if cv2.waitKey(1) == 27: # Close camera video capture video_capture.release() + # Assess visualisation time + visualisation_time = int((time.time() - visualisation_start) * 1e3) + # Stop image display cv2.destroyAllWindows() -- cgit v1.1 From 07d8e2535a8d902d5cb731d8343ebc349c198d65 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Fri, 6 Oct 2023 00:15:42 +0200 Subject: Assessing visualisation time even when no image is read. --- src/argaze/utils/demo_aruco_markers_run.py | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'src') diff --git a/src/argaze/utils/demo_aruco_markers_run.py b/src/argaze/utils/demo_aruco_markers_run.py index 6c22695..5e1ac2e 100644 --- a/src/argaze/utils/demo_aruco_markers_run.py +++ b/src/argaze/utils/demo_aruco_markers_run.py @@ -150,6 +150,11 @@ def main(): # Display scene frame cv2.imshow(f'{scene_frame.parent.name}:{scene_frame.name}', scene_frame.image()) + else: + + # Assess visualisation time + visualisation_start = time.time() + # Stop by pressing 'Esc' key # NOTE: on MacOS, cv2.waitKey(1) waits ~40ms if cv2.waitKey(1) == 27: -- cgit v1.1 From 992b84ea72e1d20b395ab8d3d50abbd494c1a749 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 10 Oct 2023 13:51:11 +0200 Subject: Fixing KCoefficient formula. --- src/argaze/GazeAnalysis/KCoefficient.py | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) (limited to 'src') diff --git a/src/argaze/GazeAnalysis/KCoefficient.py b/src/argaze/GazeAnalysis/KCoefficient.py index 80fe1fd..c50bc3a 100644 --- a/src/argaze/GazeAnalysis/KCoefficient.py +++ b/src/argaze/GazeAnalysis/KCoefficient.py @@ -52,19 +52,24 @@ class ScanPathAnalyzer(GazeFeatures.ScanPathAnalyzer): duration_std = numpy.std(durations) amplitude_std = numpy.std(amplitudes) - Ks = [] - for scan_step in scan_path: + if duration_std > 0. and amplitude_std > 0.: + + Ks = [] + for scan_step in scan_path: + + Ks.append((abs(scan_step.duration - duration_mean) / duration_std) - (abs(scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std)) + + self.__K = numpy.array(Ks).mean() - Ks.append(((scan_step.duration - duration_mean) / duration_std) - ((scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std)) + else: - self.__K = numpy.array(Ks).mean() + self.__K = 0. @property def K(self) -> float: """K coefficient.""" return self.__K - @dataclass class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer): @@ -104,12 +109,18 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer): duration_std = numpy.std(durations) amplitude_std = numpy.std(amplitudes) - Ks = [] - for aoi_scan_step in aoi_scan_path: + if duration_std > 0. and amplitude_std > 0.: + + Ks = [] + for aoi_scan_step in aoi_scan_path: + + Ks.append((abs(aoi_scan_step.duration - duration_mean) / duration_std) - (abs(aoi_scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std)) + + self.__K = numpy.array(Ks).mean() - Ks.append(((aoi_scan_step.duration - duration_mean) / duration_std) - ((aoi_scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std)) + else: - self.__K = numpy.array(Ks).mean() + self.__K = 0. @property def K(self) -> float: -- cgit v1.1 From d7107ed868229b9665ee7432dcdc1da90c97c75a Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 10 Oct 2023 16:25:56 +0200 Subject: Replacing ExploitExplore by ExploreExploit. --- .../GazeAnalysis/ExploitExploreRatio.py | 6 +- src/argaze/GazeAnalysis/ExploitExploreRatio.py | 75 ---------------------- src/argaze/GazeAnalysis/ExploreExploitRatio.py | 70 ++++++++++++++++++++ src/argaze/GazeAnalysis/__init__.py | 2 +- .../utils/demo_data/demo_gaze_analysis_setup.json | 2 +- src/argaze/utils/demo_gaze_analysis_run.py | 6 +- 6 files changed, 78 insertions(+), 83 deletions(-) delete mode 100644 src/argaze/GazeAnalysis/ExploitExploreRatio.py create mode 100644 src/argaze/GazeAnalysis/ExploreExploitRatio.py (limited to 'src') diff --git a/src/argaze.test/GazeAnalysis/ExploitExploreRatio.py b/src/argaze.test/GazeAnalysis/ExploitExploreRatio.py index 0e6b74a..7b323d4 100644 --- a/src/argaze.test/GazeAnalysis/ExploitExploreRatio.py +++ b/src/argaze.test/GazeAnalysis/ExploitExploreRatio.py @@ -10,7 +10,7 @@ __license__ = "BSD" import unittest from argaze import GazeFeatures -from argaze.GazeAnalysis import ExploitExploreRatio +from argaze.GazeAnalysis import ExploreExploitRatio from argaze.utils import UtilsFeatures GazeFeaturesTest = UtilsFeatures.importFromTestPackage('GazeFeatures') @@ -21,7 +21,7 @@ class TestScanPathAnalyzer(unittest.TestCase): def test_analyze(self): """Test analyze method.""" - xxr_analyzer = ExploitExploreRatio.ScanPathAnalyzer() + xxr_analyzer = ExploreExploitRatio.ScanPathAnalyzer() scan_path = GazeFeaturesTest.build_scan_path(10) @@ -31,7 +31,7 @@ class TestScanPathAnalyzer(unittest.TestCase): xxr_analyzer.analyze(scan_path) # Check exploit explore ratio: it should greater than 1 because of build_scan_path - self.assertGreaterEqual(xxr_analyzer.exploit_explore_ratio, 1.) + self.assertGreaterEqual(xxr_analyzer.explore_exploit_ratio, 1.) if __name__ == '__main__': diff --git a/src/argaze/GazeAnalysis/ExploitExploreRatio.py b/src/argaze/GazeAnalysis/ExploitExploreRatio.py deleted file mode 100644 index f35561f..0000000 --- a/src/argaze/GazeAnalysis/ExploitExploreRatio.py +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env python - -"""Exploit/Explore ratio module. -""" - -__author__ = "Théo de la Hogue" -__credits__ = [] -__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)" -__license__ = "BSD" - -from dataclasses import dataclass, field - -from argaze import GazeFeatures - -import numpy - -@dataclass -class ScanPathAnalyzer(GazeFeatures.ScanPathAnalyzer): - """Implementation of exploit vs explore ratio algorithm as described in: - - **Goldberg J. H., Kotval X. P. (1999).** - *Computer interface evaluation using eye movements: methods and constructs.* - International Journal of Industrial Ergonomics (631–645). - [https://doi.org/10.1016/S0169-8141(98)00068-7](https://doi.org/10.1016/S0169-8141\\(98\\)00068-7) - - **Dehais F., Peysakhovich V., Scannella S., Fongue J., Gateau T. (2015).** - *Automation surprise in aviation: Real-time solutions.* - Proceedings of the 33rd annual ACM conference on Human Factors in Computing Systems (2525–2534). - [https://doi.org/10.1145/2702123.2702521](https://doi.org/10.1145/2702123.2702521) - """ - - short_fixation_duration_threshold: float = field(default=0.) - """Time below which a fixation is considered to be short and so as exploratory.""" - - def __post_init__(self): - - super().__init__() - - self.__exploit_explore_ratio = 0. - - def analyze(self, scan_path: GazeFeatures.ScanPathType): - """Analyze scan path.""" - - assert(len(scan_path) > 1) - - short_fixations_durations = [] - long_fixations_durations = [] - saccades_durations = [] - - for scan_step in scan_path: - - if scan_step.first_fixation.duration > self.short_fixation_duration_threshold: - - long_fixations_durations.append(scan_step.first_fixation.duration) - - else: - - short_fixations_durations.append(scan_step.first_fixation.duration) - - saccades_durations.append(scan_step.last_saccade.duration) - - short_fixations_duration = numpy.array(short_fixations_durations).sum() - long_fixations_duration = numpy.array(long_fixations_durations).sum() - saccades_duration = numpy.array(saccades_durations).sum() - - assert(saccades_duration + short_fixations_duration > 0) - - self.__exploit_explore_ratio = long_fixations_duration / (saccades_duration + short_fixations_duration) - - @property - def exploit_explore_ratio(self) -> float: - """Exploit/Explore ratio.""" - - return self.__exploit_explore_ratio - \ No newline at end of file diff --git a/src/argaze/GazeAnalysis/ExploreExploitRatio.py b/src/argaze/GazeAnalysis/ExploreExploitRatio.py new file mode 100644 index 0000000..b4550e7 --- /dev/null +++ b/src/argaze/GazeAnalysis/ExploreExploitRatio.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python + +"""Explore/Explore ratio module. +""" + +__author__ = "Théo de la Hogue" +__credits__ = [] +__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)" +__license__ = "BSD" + +from dataclasses import dataclass, field + +from argaze import GazeFeatures + +import numpy + +@dataclass +class ScanPathAnalyzer(GazeFeatures.ScanPathAnalyzer): + """Implementation of explore vs exploit ratio algorithm as described in: + + **Dehais F., Peysakhovich V., Scannella S., Fongue J., Gateau T. (2015).** + *Automation surprise in aviation: Real-time solutions.* + Proceedings of the 33rd annual ACM conference on Human Factors in Computing Systems (2525–2534). + [https://doi.org/10.1145/2702123.2702521](https://doi.org/10.1145/2702123.2702521) + """ + + short_fixation_duration_threshold: float = field(default=0.) + """Time below which a fixation is considered to be short and so as exploratory.""" + + def __post_init__(self): + + super().__init__() + + self.__explore_exploit_ratio = 0. + + def analyze(self, scan_path: GazeFeatures.ScanPathType): + """Analyze scan path.""" + + assert(len(scan_path) > 1) + + short_fixations_durations = [] + long_fixations_durations = [] + saccades_durations = [] + + for scan_step in scan_path: + + if scan_step.first_fixation.duration > self.short_fixation_duration_threshold: + + long_fixations_durations.append(scan_step.first_fixation.duration) + + else: + + short_fixations_durations.append(scan_step.first_fixation.duration) + + saccades_durations.append(scan_step.last_saccade.duration) + + short_fixations_duration = numpy.array(short_fixations_durations).sum() + long_fixations_duration = numpy.array(long_fixations_durations).sum() + saccades_duration = numpy.array(saccades_durations).sum() + + assert(long_fixations_duration > 0) + + self.__explore_exploit_ratio = (saccades_duration + short_fixations_duration) / long_fixations_duration + + @property + def explore_exploit_ratio(self) -> float: + """Explore/Exploit ratio.""" + + return self.__explore_exploit_ratio + \ No newline at end of file diff --git a/src/argaze/GazeAnalysis/__init__.py b/src/argaze/GazeAnalysis/__init__.py index 164de74..62e0823 100644 --- a/src/argaze/GazeAnalysis/__init__.py +++ b/src/argaze/GazeAnalysis/__init__.py @@ -1,4 +1,4 @@ """ Various gaze movement identification, AOI matching and scan path analysis algorithms. """ -__all__ = ['Basic', 'DispersionThresholdIdentification', 'VelocityThresholdIdentification', 'TransitionMatrix', 'KCoefficient', 'LempelZivComplexity', 'NGram', 'Entropy', 'NearestNeighborIndex', 'ExploitExploreRatio'] \ No newline at end of file +__all__ = ['Basic', 'DispersionThresholdIdentification', 'VelocityThresholdIdentification', 'TransitionMatrix', 'KCoefficient', 'LempelZivComplexity', 'NGram', 'Entropy', 'NearestNeighborIndex', 'ExploreExploitRatio'] \ No newline at end of file diff --git a/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json b/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json index fe5d197..f921662 100644 --- a/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json +++ b/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json @@ -18,7 +18,7 @@ "NearestNeighborIndex": { "size": [1920, 1149] }, - "ExploitExploreRatio": { + "ExploreExploitRatio": { "short_fixation_duration_threshold": 0 } }, diff --git a/src/argaze/utils/demo_gaze_analysis_run.py b/src/argaze/utils/demo_gaze_analysis_run.py index 789657b..9856d90 100644 --- a/src/argaze/utils/demo_gaze_analysis_run.py +++ b/src/argaze/utils/demo_gaze_analysis_run.py @@ -206,12 +206,12 @@ def main(): except KeyError: pass - # Display Exploit/Explore ratio analysis if loaded + # Display Explore/Exploit ratio analysis if loaded try: - xxr_analyser = ar_frame.scan_path_analyzers["argaze.GazeAnalysis.ExploitExploreRatio"] + xxr_analyser = ar_frame.scan_path_analyzers["argaze.GazeAnalysis.ExploreExploitRatio"] - cv2.putText(frame_image, f'Exploit explore ratio: {xxr_analyser.exploit_explore_ratio:.3f}', (20, ar_frame.size[1]-360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) + cv2.putText(frame_image, f'Explore/Exploit ratio: {xxr_analyser.explore_exploit_ratio:.3f}', (20, ar_frame.size[1]-360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) except KeyError: -- cgit v1.1 From b8ea27be0fdaba40c49b93a2e84756fb00c6cde5 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 10 Oct 2023 18:09:02 +0200 Subject: Adding visual HFOV and VFOV parameter to ArCamera. Using them into ArScene project method and ArUcoCamera class. --- src/argaze/ArFeatures.py | 10 ++++++++-- src/argaze/ArUcoMarkers/ArUcoCamera.py | 2 +- 2 files changed, 9 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 02da0fe..1b24957 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -1239,13 +1239,14 @@ class ArScene(): raise NotImplementedError('estimate_pose() method not implemented') - def project(self, tvec: numpy.array, rvec: numpy.array, visual_hfov: float = 0.) -> Tuple[str, AOI2DScene.AOI2DScene]: - """Project layers according estimated pose and optional horizontal field of view clipping angle. + def project(self, tvec: numpy.array, rvec: numpy.array, visual_hfov: float = 0., visual_vfov: float = 0.) -> Tuple[str, AOI2DScene.AOI2DScene]: + """Project layers according estimated pose and optional field of view clipping angles. Parameters: tvec: translation vector rvec: rotation vector visual_hfov: horizontal field of view clipping angle + visual_vfov: vertical field of view clipping angle Returns: layer_name: name of projected layer @@ -1255,6 +1256,7 @@ class ArScene(): for name, layer in self.layers.items(): # Clip AOI out of the visual horizontal field of view (optional) + # TODO: use HFOV and VFOV and don't use vision_cone method if visual_hfov > 0: # Transform layer aoi scene into camera referential @@ -1293,9 +1295,13 @@ class ArCamera(ArFrame): Parameters: scenes: all scenes to project into camera frame + visual_hfov: Optional angle in degree to clip scenes projection according visual horizontal field of view (HFOV). + visual_vfov: Optional angle in degree to clip scenes projection according visual vertical field of view (VFOV). """ scenes: dict = field(default_factory=dict) + visual_hfov: float = field(default=0.) + visual_vfov: float = field(default=0.) def __post_init__(self): diff --git a/src/argaze/ArUcoMarkers/ArUcoCamera.py b/src/argaze/ArUcoMarkers/ArUcoCamera.py index 33f5b37..f39c516 100644 --- a/src/argaze/ArUcoMarkers/ArUcoCamera.py +++ b/src/argaze/ArUcoMarkers/ArUcoCamera.py @@ -196,7 +196,7 @@ class ArUcoCamera(ArFeatures.ArCamera): tvec, rmat, _, _ = scene.estimate_pose(self.aruco_detector.detected_markers) # Project scene into camera frame according estimated pose - for layer_name, layer_projection in scene.project(tvec, rmat): + for layer_name, layer_projection in scene.project(tvec, rmat, self.visual_hfov, self.visual_vfov): try: -- cgit v1.1 From 46b2ac7ea7fb83d520d3fe5deee46d629d6dc9d0 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 11 Oct 2023 07:34:33 +0200 Subject: renaming test file. --- .../GazeAnalysis/ExploitExploreRatio.py | 38 ---------------------- .../GazeAnalysis/ExploreExploitRatio.py | 38 ++++++++++++++++++++++ 2 files changed, 38 insertions(+), 38 deletions(-) delete mode 100644 src/argaze.test/GazeAnalysis/ExploitExploreRatio.py create mode 100644 src/argaze.test/GazeAnalysis/ExploreExploitRatio.py (limited to 'src') diff --git a/src/argaze.test/GazeAnalysis/ExploitExploreRatio.py b/src/argaze.test/GazeAnalysis/ExploitExploreRatio.py deleted file mode 100644 index 7b323d4..0000000 --- a/src/argaze.test/GazeAnalysis/ExploitExploreRatio.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python - -""" """ - -__author__ = "Théo de la Hogue" -__credits__ = [] -__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)" -__license__ = "BSD" - -import unittest - -from argaze import GazeFeatures -from argaze.GazeAnalysis import ExploreExploitRatio -from argaze.utils import UtilsFeatures - -GazeFeaturesTest = UtilsFeatures.importFromTestPackage('GazeFeatures') - -class TestScanPathAnalyzer(unittest.TestCase): - """Test ScanPathAnalyzer class.""" - - def test_analyze(self): - """Test analyze method.""" - - xxr_analyzer = ExploreExploitRatio.ScanPathAnalyzer() - - scan_path = GazeFeaturesTest.build_scan_path(10) - - # Check scan path - self.assertEqual(len(scan_path), 10) - - xxr_analyzer.analyze(scan_path) - - # Check exploit explore ratio: it should greater than 1 because of build_scan_path - self.assertGreaterEqual(xxr_analyzer.explore_exploit_ratio, 1.) - -if __name__ == '__main__': - - unittest.main() \ No newline at end of file diff --git a/src/argaze.test/GazeAnalysis/ExploreExploitRatio.py b/src/argaze.test/GazeAnalysis/ExploreExploitRatio.py new file mode 100644 index 0000000..7b323d4 --- /dev/null +++ b/src/argaze.test/GazeAnalysis/ExploreExploitRatio.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python + +""" """ + +__author__ = "Théo de la Hogue" +__credits__ = [] +__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)" +__license__ = "BSD" + +import unittest + +from argaze import GazeFeatures +from argaze.GazeAnalysis import ExploreExploitRatio +from argaze.utils import UtilsFeatures + +GazeFeaturesTest = UtilsFeatures.importFromTestPackage('GazeFeatures') + +class TestScanPathAnalyzer(unittest.TestCase): + """Test ScanPathAnalyzer class.""" + + def test_analyze(self): + """Test analyze method.""" + + xxr_analyzer = ExploreExploitRatio.ScanPathAnalyzer() + + scan_path = GazeFeaturesTest.build_scan_path(10) + + # Check scan path + self.assertEqual(len(scan_path), 10) + + xxr_analyzer.analyze(scan_path) + + # Check exploit explore ratio: it should greater than 1 because of build_scan_path + self.assertGreaterEqual(xxr_analyzer.explore_exploit_ratio, 1.) + +if __name__ == '__main__': + + unittest.main() \ No newline at end of file -- cgit v1.1 From 0d75c4adcb98426a2bc60b019bced9ae78dcf811 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 11 Oct 2023 07:35:12 +0200 Subject: Removing annotation. --- src/argaze/ArFeatures.py | 1 - 1 file changed, 1 deletion(-) (limited to 'src') diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 1b24957..b4c2658 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -1418,7 +1418,6 @@ class ArCamera(ArFrame): Parameters: timestamp: gaze position time stamp (unit does'nt matter) gaze_position: GazePosition object - timeout: maximal waiting time in ms !!! warning watch method needs to be called first. -- cgit v1.1 From f849add84404d6cfa3be5b5d29b62ceb67622f89 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 11 Oct 2023 07:35:48 +0200 Subject: Fixing bad output annotation. --- src/argaze/ArUcoMarkers/ArUcoCamera.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/argaze/ArUcoMarkers/ArUcoCamera.py b/src/argaze/ArUcoMarkers/ArUcoCamera.py index f39c516..b850dde 100644 --- a/src/argaze/ArUcoMarkers/ArUcoCamera.py +++ b/src/argaze/ArUcoMarkers/ArUcoCamera.py @@ -142,7 +142,7 @@ class ArUcoCamera(ArFeatures.ArCamera): return ArUcoCamera.from_dict(aruco_camera_data, working_directory) - def watch(self, image: numpy.array) -> Tuple[float, dict]: + def watch(self, image: numpy.array) -> Tuple[float, float, dict]: """Detect environment aruco markers from image and project scenes into camera frame. Returns: -- cgit v1.1 From 749adde269420ae7e84849bf72aa087256d10ee7 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 11 Oct 2023 11:46:38 +0200 Subject: Commenting debug print. --- src/argaze/ArFeatures.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index b4c2658..a1c7349 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -1439,8 +1439,8 @@ class ArCamera(ArFrame): # return None, None # DEBUG - if waiting_time > 0: - print(f'ArCamera: waiting {waiting_time:.3f} ms before to process gaze position at {timestamp} time.') + #if waiting_time > 0: + # print(f'ArCamera: waiting {waiting_time:.3f} ms before to process gaze position at {timestamp} time.') # Lock camera frame exploitation self._frame_lock.acquire() -- cgit v1.1 From 46b3fb454275d7431e8ea894c887179c1704c84c Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 11 Oct 2023 15:17:46 +0200 Subject: Removing useless private attributes fro ArUcoDetector class. --- src/argaze/ArUcoMarkers/ArUcoDetector.py | 46 +++++++++++++++----------------- 1 file changed, 21 insertions(+), 25 deletions(-) (limited to 'src') diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py index 490b75b..3ef7fa6 100644 --- a/src/argaze/ArUcoMarkers/ArUcoDetector.py +++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py @@ -149,8 +149,6 @@ class ArUcoDetector(): # Init detected markers data self.__detected_markers = {} - self.__detected_markers_corners = [] - self.__detected_markers_ids = [] # Init detected board data self.__board = None @@ -267,31 +265,31 @@ class ArUcoDetector(): """ # Reset detected markers data - self.__detected_markers, self.__detected_markers_corners, self.__detected_markers_ids = {}, [], [] + self.__detected_markers, detected_markers_corners, detected_markers_ids = {}, [], [] # Store marker detection start date detection_start = time.perf_counter() # Detect markers into gray picture - self.__detected_markers_corners, self.__detected_markers_ids, _ = aruco.detectMarkers(cv.cvtColor(image, cv.COLOR_BGR2GRAY), self.dictionary.markers, parameters = self.parameters.internal) + detected_markers_corners, detected_markers_ids, _ = aruco.detectMarkers(cv.cvtColor(image, cv.COLOR_BGR2GRAY), self.dictionary.markers, parameters = self.parameters.internal) # Assess marker detection time in ms detection_time = (time.perf_counter() - detection_start) * 1e3 # Is there detected markers ? - if len(self.__detected_markers_corners) > 0: + if len(detected_markers_corners) > 0: # Transform markers ids array into list - self.__detected_markers_ids = self.__detected_markers_ids.T[0] + detected_markers_ids = detected_markers_ids.T[0] # Gather detected markers data and update metrics self.__detection_count += 1 - for i, marker_id in enumerate(self.__detected_markers_ids): + for i, marker_id in enumerate(detected_markers_ids): marker = ArUcoMarker.ArUcoMarker(self.dictionary, marker_id, self.marker_size) - marker.corners = self.__detected_markers_corners[i] + marker.corners = detected_markers_corners[i] # No pose estimation: call estimate_markers_pose to get one marker.translation = numpy.empty([0]) @@ -300,6 +298,7 @@ class ArUcoDetector(): self.__detected_markers[marker_id] = marker + # Update metrics self.__detected_ids.append(marker_id) return detection_time @@ -308,26 +307,23 @@ class ArUcoDetector(): """Estimate pose of current detected markers or of given markers id list.""" # Is there detected markers ? - if len(self.__detected_markers_corners) > 0: + if len(self.__detected_markers) > 0: - # Is there a marker selection ? - if len(markers_ids) > 0: + # Select all markers by default + if len(markers_ids) == 0: - selected_markers_corners = tuple() - selected_markers_ids = [] + markers_ids = self.__detected_markers.keys() - for i, marker_id in enumerate(self.__detected_markers_ids): + # Prepare data for aruco.estimatePoseSingleMarkers function + selected_markers_corners = tuple() + selected_markers_ids = [] - if marker_id in markers_ids: - - selected_markers_corners += (self.__detected_markers_corners[i],) - selected_markers_ids.append(marker_id) + for marker_id, marker in self.__detected_markers.items(): - # Otherwise, estimate pose of all markers - else: + if marker_id in markers_ids: - selected_markers_corners = self.__detected_markers_corners - selected_markers_ids = self.__detected_markers_ids + selected_markers_corners += (marker.corners,) + selected_markers_ids.append(marker_id) # Estimate pose of selected markers if len(selected_markers_corners) > 0: @@ -377,13 +373,13 @@ class ArUcoDetector(): # detect markers from gray picture gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY) - self.__detected_markers_corners, self.__detected_markers_ids, _ = aruco.detectMarkers(gray, self.dictionary.markers, parameters = self.parameters.internal) + detected_markers_corners, detected_markers_ids, _ = aruco.detectMarkers(gray, self.dictionary.markers, parameters = self.parameters.internal) # if all board markers are detected - if len(self.__detected_markers_corners) == expected_markers_number: + if len(detected_markers_corners) == expected_markers_number: self.__board = board - self.__board_corners_number, self.__board_corners, self.__board_corners_ids = aruco.interpolateCornersCharuco(self.__detected_markers_corners, self.__detected_markers_ids, gray, self.__board.model) + self.__board_corners_number, self.__board_corners, self.__board_corners_ids = aruco.interpolateCornersCharuco(detected_markers_corners, detected_markers_ids, gray, self.__board.model) else: -- cgit v1.1 From 9e3a8e45e11a508817ae553604932171378678b2 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 11 Oct 2023 16:06:54 +0200 Subject: Smoothing marker corners if required to stabilize pose estimation. --- src/argaze/ArUcoMarkers/ArUcoDetector.py | 44 +++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 9 deletions(-) (limited to 'src') diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py index 3ef7fa6..9e40561 100644 --- a/src/argaze/ArUcoMarkers/ArUcoDetector.py +++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py @@ -131,24 +131,27 @@ class DetectorParameters(): @dataclass class ArUcoDetector(): - """ArUco markers detector.""" + """ArUco markers detector. + + Parameters: + dictionary: ArUco markers dictionary to detect. + marker_size: Size of ArUco markers to detect in centimeter. + optic_parameters: Optic parameters to use for ArUco detection into image. + parameters: ArUco detector parameters. + smooth_marker_corners: Enable marker corners smoothing to stabilize pose estimation. + """ dictionary: ArUcoMarkersDictionary.ArUcoMarkersDictionary = field(default_factory=ArUcoMarkersDictionary.ArUcoMarkersDictionary) - """ArUco markers dictionary to detect.""" - marker_size: float = field(default=0.) - """Size of ArUco markers to detect in centimeter.""" - optic_parameters: ArUcoOpticCalibrator.OpticParameters = field(default_factory=ArUcoOpticCalibrator.OpticParameters) - """Optic parameters to use for ArUco detection into image.""" - parameters: DetectorParameters = field(default_factory=DetectorParameters) - """ArUco detector parameters.""" + smooth_marker_corners: bool = field(default=False) def __post_init__(self): # Init detected markers data self.__detected_markers = {} + self.__last_detected_markers = {} # Init detected board data self.__board = None @@ -264,6 +267,9 @@ class ArUcoDetector(): detection time: marker detection time in ms. """ + # Copy last detected markers + self.__last_detected_markers = self.__detected_markers + # Reset detected markers data self.__detected_markers, detected_markers_corners, detected_markers_ids = {}, [], [] @@ -289,7 +295,27 @@ class ArUcoDetector(): marker = ArUcoMarker.ArUcoMarker(self.dictionary, marker_id, self.marker_size) - marker.corners = detected_markers_corners[i] + # Smooth marker corners if required + if self.smooth_marker_corners: + + # Try to smooth corners with last detected markers corners + try: + + # Smooth corners positions if the distance between new marker and last marker is lower than half marker size + half_marker_size_px = numpy.linalg.norm(detected_markers_corners[i][0][1] - detected_markers_corners[i][0][0]) / 2 + distance_to_last = numpy.linalg.norm(detected_markers_corners[i] - self.__last_detected_markers[marker_id].corners) + smooth_factor = 0. if distance_to_last > half_marker_size_px else (half_marker_size_px - distance_to_last) / half_marker_size_px + + marker.corners = numpy.rint(self.__last_detected_markers[marker_id].corners * smooth_factor + detected_markers_corners[i] * (1 - smooth_factor)) + + # Avoid smoothing if the marker was not part of last detection + except KeyError: + + marker.corners = detected_markers_corners[i] + + else: + + marker.corners = detected_markers_corners[i] # No pose estimation: call estimate_markers_pose to get one marker.translation = numpy.empty([0]) -- cgit v1.1 From 5f902cd2f41aa84267e2e27e53229268d8e4d579 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 11 Oct 2023 22:57:50 +0200 Subject: Major pose estimation improvement using SolvePnP algorithm. --- src/argaze/ArUcoMarkers/ArUcoCamera.py | 5 +- src/argaze/ArUcoMarkers/ArUcoDetector.py | 64 +----- src/argaze/ArUcoMarkers/ArUcoMarker.py | 2 +- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 327 +++++---------------------- src/argaze/ArUcoMarkers/ArUcoScene.py | 31 +-- 5 files changed, 66 insertions(+), 363 deletions(-) (limited to 'src') diff --git a/src/argaze/ArUcoMarkers/ArUcoCamera.py b/src/argaze/ArUcoMarkers/ArUcoCamera.py index b850dde..ed6c619 100644 --- a/src/argaze/ArUcoMarkers/ArUcoCamera.py +++ b/src/argaze/ArUcoMarkers/ArUcoCamera.py @@ -189,11 +189,8 @@ class ArUcoCamera(ArFeatures.ArCamera): try: - # Estimate scene markers poses - self.aruco_detector.estimate_markers_pose(scene.aruco_markers_group.identifiers) - # Estimate scene pose from detected scene markers - tvec, rmat, _, _ = scene.estimate_pose(self.aruco_detector.detected_markers) + tvec, rmat, _ = scene.estimate_pose(self.aruco_detector.detected_markers) # Project scene into camera frame according estimated pose for layer_name, layer_projection in scene.project(tvec, rmat, self.visual_hfov, self.visual_vfov): diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py index 9e40561..f178a20 100644 --- a/src/argaze/ArUcoMarkers/ArUcoDetector.py +++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py @@ -138,20 +138,17 @@ class ArUcoDetector(): marker_size: Size of ArUco markers to detect in centimeter. optic_parameters: Optic parameters to use for ArUco detection into image. parameters: ArUco detector parameters. - smooth_marker_corners: Enable marker corners smoothing to stabilize pose estimation. """ dictionary: ArUcoMarkersDictionary.ArUcoMarkersDictionary = field(default_factory=ArUcoMarkersDictionary.ArUcoMarkersDictionary) marker_size: float = field(default=0.) optic_parameters: ArUcoOpticCalibrator.OpticParameters = field(default_factory=ArUcoOpticCalibrator.OpticParameters) parameters: DetectorParameters = field(default_factory=DetectorParameters) - smooth_marker_corners: bool = field(default=False) def __post_init__(self): # Init detected markers data self.__detected_markers = {} - self.__last_detected_markers = {} # Init detected board data self.__board = None @@ -267,9 +264,6 @@ class ArUcoDetector(): detection time: marker detection time in ms. """ - # Copy last detected markers - self.__last_detected_markers = self.__detected_markers - # Reset detected markers data self.__detected_markers, detected_markers_corners, detected_markers_ids = {}, [], [] @@ -294,28 +288,7 @@ class ArUcoDetector(): for i, marker_id in enumerate(detected_markers_ids): marker = ArUcoMarker.ArUcoMarker(self.dictionary, marker_id, self.marker_size) - - # Smooth marker corners if required - if self.smooth_marker_corners: - - # Try to smooth corners with last detected markers corners - try: - - # Smooth corners positions if the distance between new marker and last marker is lower than half marker size - half_marker_size_px = numpy.linalg.norm(detected_markers_corners[i][0][1] - detected_markers_corners[i][0][0]) / 2 - distance_to_last = numpy.linalg.norm(detected_markers_corners[i] - self.__last_detected_markers[marker_id].corners) - smooth_factor = 0. if distance_to_last > half_marker_size_px else (half_marker_size_px - distance_to_last) / half_marker_size_px - - marker.corners = numpy.rint(self.__last_detected_markers[marker_id].corners * smooth_factor + detected_markers_corners[i] * (1 - smooth_factor)) - - # Avoid smoothing if the marker was not part of last detection - except KeyError: - - marker.corners = detected_markers_corners[i] - - else: - - marker.corners = detected_markers_corners[i] + marker.corners = detected_markers_corners[i][0] # No pose estimation: call estimate_markers_pose to get one marker.translation = numpy.empty([0]) @@ -329,41 +302,6 @@ class ArUcoDetector(): return detection_time - def estimate_markers_pose(self, markers_ids: list = []): - """Estimate pose of current detected markers or of given markers id list.""" - - # Is there detected markers ? - if len(self.__detected_markers) > 0: - - # Select all markers by default - if len(markers_ids) == 0: - - markers_ids = self.__detected_markers.keys() - - # Prepare data for aruco.estimatePoseSingleMarkers function - selected_markers_corners = tuple() - selected_markers_ids = [] - - for marker_id, marker in self.__detected_markers.items(): - - if marker_id in markers_ids: - - selected_markers_corners += (marker.corners,) - selected_markers_ids.append(marker_id) - - # Estimate pose of selected markers - if len(selected_markers_corners) > 0: - - markers_rvecs, markers_tvecs, markers_points = aruco.estimatePoseSingleMarkers(selected_markers_corners, self.marker_size, numpy.array(self.optic_parameters.K), numpy.array(self.optic_parameters.D)) - - for i, marker_id in enumerate(selected_markers_ids): - - marker = self.__detected_markers[marker_id] - - marker.translation = markers_tvecs[i][0] - marker.rotation, _ = cv.Rodrigues(markers_rvecs[i][0]) - marker.points = markers_points.reshape(4, 3) - @property def detected_markers(self) -> dict[ArUcoMarkerType]: """Access to detected markers dictionary.""" diff --git a/src/argaze/ArUcoMarkers/ArUcoMarker.py b/src/argaze/ArUcoMarkers/ArUcoMarker.py index 57bd8bd..f088dae 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarker.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarker.py @@ -68,7 +68,7 @@ class ArUcoMarker(): # Draw marker if required if color is not None: - aruco.drawDetectedMarkers(image, [self.corners], numpy.array([self.identifier]), color) + aruco.drawDetectedMarkers(image, [numpy.array([list(self.corners)])], numpy.array([self.identifier]), color) # Draw marker axes if pose has been estimated and if required if self.translation.size == 3 and self.rotation.size == 9 and draw_axes is not None: diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index 4a43965..df390b4 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -17,8 +17,7 @@ import re from argaze.ArUcoMarkers import ArUcoMarkersDictionary, ArUcoMarker, ArUcoOpticCalibrator import numpy -import cv2 as cv -import cv2.aruco as aruco +import cv2 T0 = numpy.array([0., 0., 0.]) """Define no translation vector.""" @@ -58,37 +57,16 @@ def is_rotation_matrix(R): return n < 1e-3 -def make_euler_rotation_vector(R): - - assert(is_rotation_matrix(R)) - - sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0]) - - singular = sy < 1e-6 - - if not singular : - x = math.atan2(R[2,1] , R[2,2]) - y = math.atan2(-R[2,0], sy) - z = math.atan2(R[1,0], R[0,0]) - else : - x = math.atan2(-R[1,2], R[1,1]) - y = math.atan2(-R[2,0], sy) - z = 0 - - return numpy.array([numpy.rad2deg(x), numpy.rad2deg(y), numpy.rad2deg(z)]) - @dataclass(frozen=True) class Place(): - """Define a place as a pose and a marker. + """Define a place as list of corners position and a marker. Parameters: - translation: position in group referential. - rotation: rotation in group referential. + corners: 3D corners position in group referential. marker: ArUco marker linked to the place. """ - translation: numpy.array - rotation: numpy.array + corners: numpy.array marker: dict @dataclass @@ -146,12 +124,16 @@ class ArUcoMarkersGroup(): new_marker = ArUcoMarker.ArUcoMarker(self.dictionary, identifier, self.marker_size) - new_places[identifier] = Place(tvec, rmat, new_marker) + # Build marker corners thanks to translation vector and rotation matrix + place_corners = numpy.array([[-self.marker_size/2, self.marker_size/2, 0], [self.marker_size/2, self.marker_size/2, 0], [self.marker_size/2, -self.marker_size/2, 0], [-self.marker_size/2, -self.marker_size/2, 0]]) + place_corners = place_corners.dot(rmat) + tvec + + new_places[identifier] = Place(place_corners, new_marker) - # else places are configured using detected markers + # else places are configured using detected markers estimated points elif isinstance(data, ArUcoMarker.ArUcoMarker): - new_places[identifier] = Place(data.translation, data.rotation, data) + new_places[identifier] = Place(data.points, data) # else places are already at expected format elif (type(identifier) == int) and isinstance(data, Place): @@ -160,9 +142,6 @@ class ArUcoMarkersGroup(): self.places = new_places - # Init place consistency - self.init_places_consistency() - @classmethod def from_obj(self, obj_filepath: str) -> ArUcoMarkersGroupType: """Load ArUco markers group from .obj file. @@ -264,28 +243,16 @@ class ArUcoMarkersGroup(): # Retreive marker vertices thanks to face vertice ids for identifier, face in faces.items(): - # Gather place corners from counter clockwise ordered face vertices - corners = numpy.array([ vertices[i-1] for i in face ]) - - # Edit translation (Tp) allowing to move world axis (W) at place axis (P) - Tp = corners.mean(axis=0) + # Gather place corners in clockwise order + cw_corners = numpy.array([ vertices[i-1] for i in reversed(face) ]) # Edit place axis from corners positions - place_x_axis = corners[1:3].mean(axis=0) - Tp + place_x_axis = cw_corners[1:3].mean(axis=0) place_x_axis_norm = numpy.linalg.norm(place_x_axis) - place_x_axis = place_x_axis / place_x_axis_norm - - place_y_axis = corners[2:4].mean(axis=0) - Tp + + place_y_axis = cw_corners[2:4].mean(axis=0) place_y_axis_norm = numpy.linalg.norm(place_y_axis) - place_y_axis = place_y_axis / place_y_axis_norm - place_z_axis = normals[identifier] - - # Edit rotation (Rp) allowing to transform world axis (W) into place axis (P) - W = numpy.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) - P = numpy.array([place_x_axis, place_y_axis, place_z_axis]) - Rp = W.dot(P.T) - # Check axis size: they should be almost equal if math.isclose(place_x_axis_norm, place_y_axis_norm, rel_tol=1e-3): @@ -302,7 +269,7 @@ class ArUcoMarkersGroup(): # Create a new place related to a new marker new_marker = ArUcoMarker.ArUcoMarker(new_dictionary, identifier, new_marker_size) - new_places[identifier] = Place(Tp, Rp, new_marker) + new_places[identifier] = Place(cw_corners, new_marker) except IOError: raise IOError(f'File not found: {obj_filepath}') @@ -337,18 +304,7 @@ class ArUcoMarkersGroup(): output += '\n\n\tPlaces:' for identifier, place in self.places.items(): output += f'\n\t\t- {identifier}:' - output += f'\n{place.translation}' - output += f'\n{place.rotation}' - - output += '\n\n\tAngle cache:' - for A_identifier, A_angle_cache in self.__rotation_cache.items(): - for B_identifier, angle in A_angle_cache.items(): - output += f'\n\t\t- {A_identifier}/{B_identifier}: [{angle[0]:3f} {angle[1]:3f} {angle[2]:3f}]' - - output += '\n\n\tDistance cache:' - for A_identifier, A_distance_cache in self.__translation_cache.items(): - for B_identifier, distance in A_distance_cache.items(): - output += f'\n\t\t- {A_identifier}/{B_identifier}: {distance:3f}' + output += f'\n{place.corners}' return output @@ -381,148 +337,22 @@ class ArUcoMarkersGroup(): return group_markers, remaining_markers - def init_places_consistency(self): - """Initialize places consistency to speed up further markers consistency checking.""" - - # Process expected rotation between places combinations to speed up further calculations - self.__rotation_cache = {} - for (A_identifier, A_place), (B_identifier, B_place) in itertools.combinations(self.places.items(), 2): - - A = self.places[A_identifier].rotation - B = self.places[B_identifier].rotation - - if numpy.array_equal(A, B): - - AB_rvec = [0., 0., 0.] - BA_rvec = [0., 0., 0.] - - else: - - # Calculate euler angle representation of AB and BA rotation matrix - AB_rvec = make_euler_rotation_vector(B.dot(A.T)) - BA_rvec = make_euler_rotation_vector(A.dot(B.T)) - - try: - self.__rotation_cache[A_identifier][B_identifier] = AB_rvec - except: - self.__rotation_cache[A_identifier] = {B_identifier: AB_rvec} - - try: - self.__rotation_cache[B_identifier][A_identifier] = BA_rvec - except: - self.__rotation_cache[B_identifier] = {A_identifier: BA_rvec} - - # Process translation between each places combinations to speed up further calculations - self.__translation_cache = {} - for (A_identifier, A_place), (B_identifier, B_place) in itertools.combinations(self.places.items(), 2): - - A = self.places[A_identifier].translation - B = self.places[B_identifier].translation - - # Calculate translation between A and B position - AB_tvec = numpy.linalg.norm(B - A) - - try: - self.__translation_cache[A_identifier][B_identifier] = AB_tvec - except: - self.__translation_cache[A_identifier] = {B_identifier: AB_tvec} - - try: - self.__translation_cache[B_identifier][A_identifier] = AB_tvec - except: - self.__translation_cache[B_identifier] = {A_identifier: AB_tvec} + def estimate_pose_from_markers_corners(self, markers: dict, K: numpy.array, D: numpy.array) -> Tuple[bool, numpy.array, numpy.array]: + """Estimate pose from markers corners and places corners. - def check_markers_consistency(self, group_markers: dict, angle_tolerance: float, distance_tolerance: float) -> Tuple[dict, dict, dict]: - """Evaluate if given markers configuration match related places configuration. + Parameters: + markers: detected markers to use for pose estimation. + K: intrinsic camera parameters + D: camera distorsion matrix Returns: - dict of consistent markers - dict of unconsistent markers - dict of identified distance or angle unconsistencies and out-of-bounds values + success: True if the pose estimation succeeded + tvec: scene translation vector + rvec: scene rotation vector """ - consistent_markers = {} - unconsistencies = {'rotation': {}, 'translation': {}} - - for (A_identifier, A_marker), (B_identifier, B_marker) in itertools.combinations(group_markers.items(), 2): - - try: - - # Rotation matrix from A marker to B marker - AB = B_marker.rotation.dot(A_marker.rotation.T) - - # Calculate euler angle representation of AB rotation matrix - AB_rvec = make_euler_rotation_vector(AB) - expected_rvec= self.__rotation_cache[A_identifier][B_identifier] - - # Calculate distance between A marker center and B marker center - AB_tvec = numpy.linalg.norm(A_marker.translation - B_marker.translation) - expected_tvec = self.__translation_cache[A_identifier][B_identifier] - - # Check angle and distance according given tolerance then normalise marker pose - consistent_rotation = numpy.allclose(AB_rvec, expected_rvec, atol=angle_tolerance) - consistent_translation = math.isclose(AB_tvec, expected_tvec, abs_tol=distance_tolerance) - - if consistent_rotation and consistent_translation: - - if A_identifier not in consistent_markers.keys(): - - # Remember this marker is already validated - consistent_markers[A_identifier] = A_marker - - if B_identifier not in consistent_markers.keys(): - - # Remember this marker is already validated - consistent_markers[B_identifier] = B_marker - - else: - - if not consistent_rotation: - unconsistencies['rotation'][f'{A_identifier}/{B_identifier}'] = {'current': AB_rvec, 'expected': expected_rvec} - - if not consistent_translation: - unconsistencies['translation'][f'{A_identifier}/{B_identifier}'] = {'current': AB_tvec, 'expected': expected_tvec} - - except KeyError: - - raise ValueError(f'Marker {A_identifier} or {B_identifier} don\'t belong to the group.') - - # Gather unconsistent markers - unconsistent_markers = {} - - for identifier, marker in group_markers.items(): - - if identifier not in consistent_markers.keys(): - - unconsistent_markers[identifier] = marker - - return consistent_markers, unconsistent_markers, unconsistencies - - def estimate_pose_from_single_marker(self, marker: ArUcoMarker.ArUcoMarker) -> Tuple[numpy.array, numpy.array]: - """Calculate rotation and translation that move a marker to its place.""" - - # Get the place related to the given marker - try: - - place = self.places[marker.identifier] - - # Rotation matrix that transform marker to related place - self._rotation = marker.rotation.dot(place.rotation.T) - - # Translation vector that transform marker to related place - self._translation = marker.translation - place.translation.dot(place.rotation).dot(marker.rotation.T) - - return self._translation, self._rotation - - except KeyError: - - raise ValueError(f'Marker {marker.identifier} doesn\'t belong to the group.') - - def estimate_pose_from_markers(self, markers: dict) -> Tuple[numpy.array, numpy.array]: - """Calculate average rotation and translation that move markers to their related places.""" - - rotations = [] - translations = [] + markers_corners_2d = [] + places_corners_3d = [] for identifier, marker in markers.items(): @@ -530,72 +360,23 @@ class ArUcoMarkersGroup(): place = self.places[identifier] - # Rotation matrix that transform marker to related place - R = marker.rotation.dot(place.rotation.T) - - # Translation vector that transform marker to related place - T = marker.translation - place.translation.dot(place.rotation).dot(marker.rotation.T) + for marker_corner in marker.corners: + markers_corners_2d.append(list(marker_corner)) - rotations.append(R) - translations.append(T) + for place_corner in place.corners: + places_corners_3d.append(list(place_corner)) except KeyError: raise ValueError(f'Marker {marker.identifier} doesn\'t belong to the group.') - # Consider ArUcoMarkersGroup rotation as the mean of all marker rotations - # !!! WARNING !!! This is a bad hack : processing rotations average is a very complex problem that needs to well define the distance calculation method before. - self._rotation = numpy.mean(numpy.array(rotations), axis=0) - - # Consider ArUcoMarkersGroup translation as the mean of all marker translations - self._translation = numpy.mean(numpy.array(translations), axis=0) - - return self._translation, self._rotation - - def estimate_pose_from_axis_markers(self, origin_marker: ArUcoMarker.ArUcoMarker, horizontal_axis_marker: ArUcoMarker.ArUcoMarker, vertical_axis_marker: ArUcoMarker.ArUcoMarker) -> Tuple[numpy.array, numpy.array]: - """Calculate rotation and translation from 3 markers defining an orthogonal axis.""" - - O_marker = origin_marker - A_marker = horizontal_axis_marker - B_marker = vertical_axis_marker - - O_place = self.places[O_marker.identifier] - A_place = self.places[A_marker.identifier] - B_place = self.places[B_marker.identifier] - - # Place axis - OA = A_place.translation - O_place.translation - OA = OA / numpy.linalg.norm(OA) - - OB = B_place.translation - O_place.translation - OB = OB / numpy.linalg.norm(OB) - - # Detect and correct bad place axis orientation - X_sign = numpy.sign(OA)[0] - Y_sign = numpy.sign(OB)[1] - - P = numpy.array([OA*X_sign, OB*Y_sign, numpy.cross(OA*X_sign, OB*Y_sign)]) - - # Marker axis - OA = A_marker.translation - O_marker.translation - OA = OA / numpy.linalg.norm(OA) - - OB = B_marker.translation - O_marker.translation - OB = OB / numpy.linalg.norm(OB) - - # Detect and correct bad place axis orientation - X_sign = numpy.sign(OA)[0] - Y_sign = -numpy.sign(OB)[1] - - M = numpy.array([OA*X_sign, OB*Y_sign, numpy.cross(OA*X_sign, OB*Y_sign)]) - - # Then estimate ArUcoMarkersGroup rotation - self._rotation = P.dot(M.T) + # Solve + success, rvec, tvec = cv2.solvePnP(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), flags=0) - # Consider ArUcoMarkersGroup translation as the translation of the marker at axis origin - self._translation = O_marker.translation - O_place.translation.dot(O_place.rotation).dot(M.T) + self._translation = tvec.T + self._rotation = rvec.T - return self._translation, self._rotation + return success, self._translation, self._rotation @property def translation(self) -> numpy.array: @@ -624,15 +405,15 @@ class ArUcoMarkersGroup(): try: axisPoints = numpy.float32([[length, 0, 0], [0, length, 0], [0, 0, length], [0, 0, 0]]).reshape(-1, 3) - axisPoints, _ = cv.projectPoints(axisPoints, self._rotation, self._translation, numpy.array(K), numpy.array(D)) + axisPoints, _ = cv2.projectPoints(axisPoints, self._rotation, self._translation, numpy.array(K), numpy.array(D)) axisPoints = axisPoints.astype(int) - cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (0, 0, 255), thickness) # X (red) - cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (0, 255, 0), thickness) # Y (green) - cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (255, 0, 0), thickness) # Z (blue) + cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (0, 0, 255), thickness) # X (red) + cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (0, 255, 0), thickness) # Y (green) + cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (255, 0, 0), thickness) # Z (blue) # Ignore errors due to out of field axis: their coordinate are larger than int32 limitations. - except cv.error: + except cv2.error: pass def draw_places(self, image: numpy.array, K, D, color: tuple = None, border_size: int = 0): @@ -648,16 +429,16 @@ class ArUcoMarkersGroup(): R = self.places[identifier].rotation placePoints = (T + numpy.float32([R.dot([-l, -l, 0]), R.dot([l, -l, 0]), R.dot([l, l, 0]), R.dot([-l, l, 0])])).reshape(-1, 3) - placePoints, _ = cv.projectPoints(placePoints, self._rotation, self._translation, numpy.array(K), numpy.array(D)) + placePoints, _ = cv2.projectPoints(placePoints, self._rotation, self._translation, numpy.array(K), numpy.array(D)) placePoints = placePoints.astype(int) - cv.line(image, tuple(placePoints[0].ravel()), tuple(placePoints[1].ravel()), color, border_size) - cv.line(image, tuple(placePoints[1].ravel()), tuple(placePoints[2].ravel()), color, border_size) - cv.line(image, tuple(placePoints[2].ravel()), tuple(placePoints[3].ravel()), color, border_size) - cv.line(image, tuple(placePoints[3].ravel()), tuple(placePoints[0].ravel()), color, border_size) + cv2.line(image, tuple(placePoints[0].ravel()), tuple(placePoints[1].ravel()), color, border_size) + cv2.line(image, tuple(placePoints[1].ravel()), tuple(placePoints[2].ravel()), color, border_size) + cv2.line(image, tuple(placePoints[2].ravel()), tuple(placePoints[3].ravel()), color, border_size) + cv2.line(image, tuple(placePoints[3].ravel()), tuple(placePoints[0].ravel()), color, border_size) # Ignore errors due to out of field places: their coordinate are larger than int32 limitations. - except cv.error: + except cv2.error: pass def draw_places_axes(self, image: numpy.array, K, D, thickness: int = 0, length: float = 0): @@ -671,15 +452,15 @@ class ArUcoMarkersGroup(): R = self.places[identifier].rotation axisPoints = (T + numpy.float32([R.dot([length, 0, 0]), R.dot([0, length, 0]), R.dot([0, 0, length]), R.dot([0, 0, 0])])).reshape(-1, 3) - axisPoints, _ = cv.projectPoints(axisPoints, self._rotation, self._translation, numpy.array(K), numpy.array(D)) + axisPoints, _ = cv2.projectPoints(axisPoints, self._rotation, self._translation, numpy.array(K), numpy.array(D)) axisPoints = axisPoints.astype(int) - cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (0, 0, 255), thickness) # X (red) - cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (0, 255, 0), thickness) # Y (green) - cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (255, 0, 0), thickness) # Z (blue) + cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (0, 0, 255), thickness) # X (red) + cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (0, 255, 0), thickness) # Y (green) + cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (255, 0, 0), thickness) # Z (blue) # Ignore errors due to out of field places: their coordinate are larger than int32 limitations. - except cv.error: + except cv2.error: pass def draw(self, image: numpy.array, K, D, draw_axes: dict = None, draw_places: dict = None, draw_places_axes: dict = None): diff --git a/src/argaze/ArUcoMarkers/ArUcoScene.py b/src/argaze/ArUcoMarkers/ArUcoScene.py index f6b303a..b8b9cfd 100644 --- a/src/argaze/ArUcoMarkers/ArUcoScene.py +++ b/src/argaze/ArUcoMarkers/ArUcoScene.py @@ -96,14 +96,13 @@ class ArUcoScene(ArFeatures.ArScene): # Create new aruco scene using temporary ar scene values return ArUcoScene(aruco_markers_group=new_aruco_markers_group, **temp_ar_scene_values) - def estimate_pose(self, detected_markers) -> Tuple[numpy.array, numpy.array, str, dict]: + def estimate_pose(self, detected_markers) -> Tuple[numpy.array, numpy.array, dict]: """Estimate scene pose from detected ArUco markers. Returns: - scene translation vector - scene rotation matrix - pose estimation strategy - dict of markers used to estimate the pose + scene translation vector + scene rotation matrix + dict of markers used to estimate the pose """ # Pose estimation fails when no marker is detected @@ -118,26 +117,14 @@ class ArUcoScene(ArFeatures.ArScene): raise ArFeatures.PoseEstimationFailed('No marker belongs to the scene') - # Estimate scene pose from unique marker transformations - elif len(scene_markers) == 1: + # Estimate pose from a markers corners + success, tvec, rmat = self.aruco_markers_group.estimate_pose_from_markers_corners(scene_markers, self.parent.aruco_detector.optic_parameters.K, self.parent.aruco_detector.optic_parameters.D) - marker_id, marker = scene_markers.popitem() - tvec, rmat = self.aruco_markers_group.estimate_pose_from_single_marker(marker) - - return tvec, rmat, 'estimate_pose_from_single_marker', {marker_id: marker} + if not success: - # Otherwise, check markers consistency - consistent_markers, unconsistent_markers, unconsistencies = self.aruco_markers_group.check_markers_consistency(scene_markers, self.angle_tolerance, self.distance_tolerance) + raise ArFeatures.PoseEstimationFailed('Can\'t estimate pose from markers corners positions') - # Pose estimation fails when no marker passes consistency checking - if len(consistent_markers) == 0: - - raise ArFeatures.PoseEstimationFailed('Unconsistent marker poses', unconsistencies) - - # Otherwise, estimate scene pose from all consistent markers pose - tvec, rmat = self.aruco_markers_group.estimate_pose_from_markers(consistent_markers) - - return tvec, rmat, 'estimate_pose_from_markers', consistent_markers + return tvec, rmat, scene_markers def draw(self, image: numpy.array, draw_aruco_markers_group: dict = None): """ -- cgit v1.1 From a1ee2b893cce70ba03fbba1c12d9d0829e3e9632 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Thu, 12 Oct 2023 21:26:27 +0200 Subject: Adding estimate_markers_pose method to ArUcoDetector. --- src/argaze/ArUcoMarkers/ArUcoDetector.py | 36 ++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'src') diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py index f178a20..01527a1 100644 --- a/src/argaze/ArUcoMarkers/ArUcoDetector.py +++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py @@ -302,6 +302,42 @@ class ArUcoDetector(): return detection_time + def estimate_markers_pose(self, markers_ids: list = []): + """Estimate pose of current detected markers or of given markers id list.""" + + # Is there detected markers ? + if len(self.__detected_markers) > 0: + + # Select all markers by default + if len(markers_ids) == 0: + + markers_ids = self.__detected_markers.keys() + + # Prepare data for aruco.estimatePoseSingleMarkers function + selected_markers_corners = tuple() + selected_markers_ids = [] + + for marker_id, marker in self.__detected_markers.items(): + + if marker_id in markers_ids: + + selected_markers_corners += (marker.corners,) + selected_markers_ids.append(marker_id) + + # Estimate pose of selected markers + if len(selected_markers_corners) > 0: + + markers_rvecs, markers_tvecs, markers_points = aruco.estimatePoseSingleMarkers(selected_markers_corners, self.marker_size, numpy.array(self.optic_parameters.K), numpy.array(self.optic_parameters.D)) + + for i, marker_id in enumerate(selected_markers_ids): + + marker = self.__detected_markers[marker_id] + + marker.translation = markers_tvecs[i][0] + marker.rotation, _ = cv.Rodrigues(markers_rvecs[i][0]) + + marker.points = markers_points.reshape(4, 3).dot(marker.rotation) + marker.translation + @property def detected_markers(self) -> dict[ArUcoMarkerType]: """Access to detected markers dictionary.""" -- cgit v1.1 From 673c21df7f231f5acd05311445686d0e521d5e7a Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Thu, 12 Oct 2023 21:26:59 +0200 Subject: minor annotation change. --- src/argaze/ArUcoMarkers/ArUcoMarker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/argaze/ArUcoMarkers/ArUcoMarker.py b/src/argaze/ArUcoMarkers/ArUcoMarker.py index f088dae..0f368f6 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarker.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarker.py @@ -29,7 +29,7 @@ class ArUcoMarker(): """Size of marker in centimeters.""" corners: numpy.array = field(init=False, repr=False) - """Estimated 2D corner positions in camera image referential.""" + """Estimated 2D corners position in camera image referential.""" translation: numpy.array = field(init=False, repr=False) """Estimated 3D center position in camera world referential.""" -- cgit v1.1 From f26058148061f80eb4bb3fe16d6a24c910bf8bd5 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Thu, 12 Oct 2023 21:27:31 +0200 Subject: Fixing ArUcoMarkersGroup.to_obj method. --- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) (limited to 'src') diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index df390b4..bedd408 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -497,26 +497,24 @@ class ArUcoMarkersGroup(): v_count = 0 - for identifier, place in self.places.items(): + for p, (identifier, place) in enumerate(self.places.items()): file.write(f'o {self.dictionary.name}#{identifier}_Marker\n') vertices = '' - T = place.translation - R = place.rotation - - points = (T + numpy.float32([R.dot(place.marker.points[0]), R.dot(place.marker.points[1]), R.dot(place.marker.points[2]), R.dot(place.marker.points[3])])).reshape(-1, 3) - - print(points) - # Write vertices in reverse order - for i in [3, 2, 1, 0]: + for v in [3, 2, 1, 0]: - file.write(f'v {" ".join(map(str, points[i]))}\n') + file.write(f'v {" ".join(map(str, place.corners[v]))}\n') v_count += 1 - vertices += f' {v_count}' + vertices += f' {v_count}//{p+1}' + + # Write normal vector + nvec = numpy.cross(place.corners[-1] - place.corners[0], place.corners[1] - place.corners[0]) + nvec = nvec / numpy.linalg.norm(nvec) + file.write(f'vn {nvec[0]} {nvec[1]} {nvec[2]}\n') - file.write('s off\n') + #file.write('s off\n') file.write(f'f{vertices}\n') -- cgit v1.1 From a2960562b3a66f610d8d7d8e80faedc2fff024b8 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Mon, 16 Oct 2023 12:19:05 +0200 Subject: Improving DeviationcircleCoverage module. Outputting probabilities instead of coverages values. --- src/argaze/GazeAnalysis/DeviationCircleCoverage.py | 27 +++++++++++----------- 1 file changed, 14 insertions(+), 13 deletions(-) (limited to 'src') diff --git a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py index f57d432..acc0665 100644 --- a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py +++ b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py @@ -33,8 +33,8 @@ class AOIMatcher(GazeFeatures.AOIMatcher): self.__look_count = 0 self.__looked_aoi_data = (None, None) + self.__looked_probabilities = {} self.__circle_ratio_sum = {} - self.__aoi_coverages = {} self.__matched_gaze_movement = None self.__matched_region = None @@ -54,7 +54,7 @@ class AOIMatcher(GazeFeatures.AOIMatcher): # BAD: we use deviation_max attribute which is an attribute of DispersionThresholdIdentification.Fixation class region, _, circle_ratio = aoi.circle_intersection(gaze_movement.focus, gaze_movement.deviation_max) - if name not in self.exclude and circle_ratio > 0: + if name not in self.exclude and circle_ratio > self.coverage_threshold: # Sum circle ratio to update aoi coverage try: @@ -78,15 +78,15 @@ class AOIMatcher(GazeFeatures.AOIMatcher): # Update looked aoi data self.__looked_aoi_data = most_likely_looked_aoi_data - # Calculate looked aoi circle ratio means - self.__aoi_coverages = {} + # Calculate circle ratio means as looked probabilities + self.__looked_probabilities = {} for aoi_name, circle_ratio_sum in self.__circle_ratio_sum.items(): circle_ratio_mean = circle_ratio_sum / self.__look_count - # filter circle ration mean greater than 1 - self.__aoi_coverages[aoi_name] = circle_ratio_mean if circle_ratio_mean < 1 else 1 + # Avoid probability greater than 1 + self.__looked_probabilities[aoi_name] = circle_ratio_mean if circle_ratio_mean < 1 else 1 # Update matched gaze movement self.__matched_gaze_movement = gaze_movement @@ -95,9 +95,7 @@ class AOIMatcher(GazeFeatures.AOIMatcher): self.__matched_region = matched_region # Return - if self.__aoi_coverages[most_likely_looked_aoi_data[0]] > self.coverage_threshold: - - return self.__looked_aoi_data + return self.__looked_aoi_data elif GazeFeatures.is_saccade(gaze_movement): @@ -173,8 +171,11 @@ class AOIMatcher(GazeFeatures.AOIMatcher): return self.__looked_aoi_data[0] @property - def aoi_coverages(self) -> dict: - """Get all aoi coverage means for current fixation. - It represents the ratio of fixation deviation circle surface that used to cover the aoi.""" + def looked_probabilities(self) -> dict: + """Get probabilities to be looked by current fixation for each aoi. + + !!! note + aoi where fixation deviation circle never passed the coverage threshold will be missing. + """ - return self.__aoi_coverages \ No newline at end of file + return self.__looked_probabilities \ No newline at end of file -- cgit v1.1 From f18c8dc95e1016f0783808fd5ab531fee62f4998 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Mon, 16 Oct 2023 16:49:58 +0200 Subject: Failing to estimate pose when only one marker belongs to the scene. --- src/argaze/ArUcoMarkers/ArUcoScene.py | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'src') diff --git a/src/argaze/ArUcoMarkers/ArUcoScene.py b/src/argaze/ArUcoMarkers/ArUcoScene.py index b8b9cfd..51dd88c 100644 --- a/src/argaze/ArUcoMarkers/ArUcoScene.py +++ b/src/argaze/ArUcoMarkers/ArUcoScene.py @@ -117,6 +117,11 @@ class ArUcoScene(ArFeatures.ArScene): raise ArFeatures.PoseEstimationFailed('No marker belongs to the scene') + # Pose estimation fails if only one marker belongs to the scene + if len(scene_markers) == 1: + + raise ArFeatures.PoseEstimationFailed('Only one marker belongs to the scene') + # Estimate pose from a markers corners success, tvec, rmat = self.aruco_markers_group.estimate_pose_from_markers_corners(scene_markers, self.parent.aruco_detector.optic_parameters.K, self.parent.aruco_detector.optic_parameters.D) -- cgit v1.1 From 0835382b4b12652e23dcebe2456431c3e625dc3a Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Mon, 16 Oct 2023 19:32:15 +0200 Subject: Fixing marker size guessing. --- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index bedd408..d30a730 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -247,16 +247,16 @@ class ArUcoMarkersGroup(): cw_corners = numpy.array([ vertices[i-1] for i in reversed(face) ]) # Edit place axis from corners positions - place_x_axis = cw_corners[1:3].mean(axis=0) + place_x_axis = cw_corners[2] - cw_corners[3] place_x_axis_norm = numpy.linalg.norm(place_x_axis) - place_y_axis = cw_corners[2:4].mean(axis=0) + place_y_axis = cw_corners[0] - cw_corners[3] place_y_axis_norm = numpy.linalg.norm(place_y_axis) # Check axis size: they should be almost equal if math.isclose(place_x_axis_norm, place_y_axis_norm, rel_tol=1e-3): - current_marker_size = place_x_axis_norm*2 + current_marker_size = place_x_axis_norm # Check that all markers size are almost equal if new_marker_size > 0: -- cgit v1.1 From 4c9abeb88568d3725f47ecd1a47ddf74767157e2 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Mon, 16 Oct 2023 22:37:03 +0200 Subject: Using SolvePnP with flag 3. --- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index d30a730..dddead4 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -371,7 +371,7 @@ class ArUcoMarkersGroup(): raise ValueError(f'Marker {marker.identifier} doesn\'t belong to the group.') # Solve - success, rvec, tvec = cv2.solvePnP(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), flags=0) + success, rvec, tvec = cv2.solvePnP(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), flags=3) self._translation = tvec.T self._rotation = rvec.T -- cgit v1.1 From 9c6f43140bbe9a387e74a725843914439bd5c1fc Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 17 Oct 2023 08:47:41 +0200 Subject: Adding annotation. --- src/argaze/ArUcoMarkers/ArUcoDetector.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'src') diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py index 01527a1..51f8366 100644 --- a/src/argaze/ArUcoMarkers/ArUcoDetector.py +++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py @@ -260,6 +260,9 @@ class ArUcoDetector(): !!! danger "DON'T MIRROR IMAGE" It makes the markers detection to fail. + !!! danger "DON'T UNDISTORED IMAGE" + Camera intrisic parameters and distorsion coefficients are used later during pose estimation. + Returns: detection time: marker detection time in ms. """ -- cgit v1.1 From 3934f14dbab4206d091f07dd29bf3ad3dfb9b787 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 17 Oct 2023 09:14:11 +0200 Subject: Adding useAruco3Detection as possible parameters. --- src/argaze/ArUcoMarkers/ArUcoDetector.py | 3 ++- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 11 +++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py index 51f8366..e62a42e 100644 --- a/src/argaze/ArUcoMarkers/ArUcoDetector.py +++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py @@ -72,7 +72,8 @@ class DetectorParameters(): 'minOtsuStdDev', 'perspectiveRemoveIgnoredMarginPerCell', 'perspectiveRemovePixelPerCell', - 'polygonalApproxAccuracyRate' + 'polygonalApproxAccuracyRate', + 'useAruco3Detection' ] def __init__(self, **kwargs): diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index dddead4..8600690 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -370,8 +370,15 @@ class ArUcoMarkersGroup(): raise ValueError(f'Marker {marker.identifier} doesn\'t belong to the group.') - # Solve - success, rvec, tvec = cv2.solvePnP(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), flags=3) + # SolvPnP using cv2.SOLVEPNP_SQPNP flag + # TODO: it works also with cv2.SOLVEPNP_EPNP flag so we need to test which is the faster. + # About SolvPnP flags: https://docs.opencv.org/4.x/d5/d1f/calib3d_solvePnP.html + success, rvec, tvec = cv2.solvePnP(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), flags=cv2.SOLVEPNP_SQPNP) + + # Refine pose estimation using Gauss-Newton optimisation + #if success : + + #rvec, tvec = cv2.solvePnPRefineVVS(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), rvec, tvec) self._translation = tvec.T self._rotation = rvec.T -- cgit v1.1 From 77914e2aa25623a237a58b7c80f712129cbb2b55 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 17 Oct 2023 09:15:51 +0200 Subject: Uncommenting pose estimation optimisation --- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index 8600690..edae927 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -376,9 +376,9 @@ class ArUcoMarkersGroup(): success, rvec, tvec = cv2.solvePnP(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), flags=cv2.SOLVEPNP_SQPNP) # Refine pose estimation using Gauss-Newton optimisation - #if success : + if success : - #rvec, tvec = cv2.solvePnPRefineVVS(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), rvec, tvec) + rvec, tvec = cv2.solvePnPRefineVVS(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), rvec, tvec) self._translation = tvec.T self._rotation = rvec.T -- cgit v1.1 From 1d46c5816ba603105dfaa1b5a79f3a167fdc99d8 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 17 Oct 2023 12:56:41 +0200 Subject: Adding GazePositionCalibrator class. Adding LinearRegression module. --- src/argaze/GazeAnalysis/LinearRegression.py | 80 +++++++++++++++++++++++++++++ src/argaze/GazeAnalysis/__init__.py | 2 +- src/argaze/GazeFeatures.py | 56 ++++++++++++++++++++ 3 files changed, 137 insertions(+), 1 deletion(-) create mode 100644 src/argaze/GazeAnalysis/LinearRegression.py (limited to 'src') diff --git a/src/argaze/GazeAnalysis/LinearRegression.py b/src/argaze/GazeAnalysis/LinearRegression.py new file mode 100644 index 0000000..5a92048 --- /dev/null +++ b/src/argaze/GazeAnalysis/LinearRegression.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python + +"""Module for gaze position calibration based on linear regression. +""" + +__author__ = "Théo de la Hogue" +__credits__ = [] +__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)" +__license__ = "BSD" + +from typing import TypeVar, Tuple +from dataclasses import dataclass, field + +from argaze import GazeFeatures + +from sklearn.linear_model import LinearRegression +import numpy +import cv2 + +GazePositionType = TypeVar('GazePositionType', bound="GazePositionType") +# Type definition for type annotation convenience + +@dataclass +class GazePositionCalibrator(GazeFeatures.GazePositionCalibrator): + """Calibration algorithm based on linear regression.""" + + coefficients: numpy.array = field(default_factory=lambda : numpy.array([[1., 0.], [0., 1.]])) + """Linear regression coefficients""" + + intercept: numpy.array = field(default_factory=lambda : numpy.array([0., 0.])) + """Linear regression intercept value""" + + def __post_init__(self): + """Init calibration data.""" + + self.reset() + + def store(self, timestamp: int|float, observed_gaze_position: GazeFeatures.GazePosition, expected_gaze_position: GazeFeatures.GazePosition): + """Store observed and expected gaze positions.""" + + self.__observed_positions.append(observed_gaze_position.value) + self.__expected_positions.append(expected_gaze_position.value) + + def reset(self): + """Reset observed and expected gaze positions.""" + + self.__observed_positions = [] + self.__expected_positions = [] + self.__linear_regression = None + + def calibrate(self) -> float: + """Process calibration from observed and expected gaze positions. + + Returns: + score: the score of linear regression + """ + + self.__linear_regression = LinearRegression().fit(self.__observed_positions, self.__expected_positions) + + return self.__linear_regression.score(self.__observed_positions, self.__expected_positions) + + def apply(self, gaze_position: GazeFeatures.GazePosition) -> GazePositionType: + """Apply calibration onto observed gaze position.""" + + return GazeFeatures.GazePosition(self.__linear_regression.predict(numpy.array([gaze_position.value]))[0], precision=gaze_position.precision) + + def draw(self, image: numpy.array): + """Draw calibration into image. + + Parameters: + image: where to draw + """ + + raise NotImplementedError('draw() method not implemented') + + @property + def ready(self) -> bool: + """Is the calibrator ready?""" + + return self.__linear_regression is not None \ No newline at end of file diff --git a/src/argaze/GazeAnalysis/__init__.py b/src/argaze/GazeAnalysis/__init__.py index 62e0823..c110eb1 100644 --- a/src/argaze/GazeAnalysis/__init__.py +++ b/src/argaze/GazeAnalysis/__init__.py @@ -1,4 +1,4 @@ """ Various gaze movement identification, AOI matching and scan path analysis algorithms. """ -__all__ = ['Basic', 'DispersionThresholdIdentification', 'VelocityThresholdIdentification', 'TransitionMatrix', 'KCoefficient', 'LempelZivComplexity', 'NGram', 'Entropy', 'NearestNeighborIndex', 'ExploreExploitRatio'] \ No newline at end of file +__all__ = ['Basic', 'DispersionThresholdIdentification', 'VelocityThresholdIdentification', 'TransitionMatrix', 'KCoefficient', 'LempelZivComplexity', 'NGram', 'Entropy', 'NearestNeighborIndex', 'ExploreExploitRatio', 'LinearRegression'] \ No newline at end of file diff --git a/src/argaze/GazeFeatures.py b/src/argaze/GazeFeatures.py index bd1a3da..b918256 100644 --- a/src/argaze/GazeFeatures.py +++ b/src/argaze/GazeFeatures.py @@ -201,6 +201,62 @@ class TimeStampedGazePositions(DataStructures.TimeStampedBuffer): return TimeStampedGazePositions(df.to_dict('index')) +@dataclass +class GazePositionCalibrator(): + """Abstract class to define what should provide a gaze position calibrator algorithm.""" + + def store(self, timestamp: int|float, observed_gaze_position: GazePosition, expected_gaze_position: GazePosition): + """Store observed and expected gaze positions. + + Parameters: + timestamp: time of observed gaze position + observed_gaze_position: where gaze position actually is + expected_gaze_position: where gaze position should be + """ + + raise NotImplementedError('calibrate() method not implemented') + + def reset(self): + """Reset observed and expected gaze positions.""" + + raise NotImplementedError('reset() method not implemented') + + def calibrate(self) -> Any: + """Process calibration from observed and expected gaze positions. + + Returns: + calibration outputs: any data returned to assess calibration + """ + + raise NotImplementedError('terminate() method not implemented') + + def apply(self, observed_gaze_position: GazePosition) -> GazePositionType: + """Apply calibration onto observed gaze position. + + Parameters: + observed_gaze_position: where gaze position actually is + + Returns: + expected_gaze_position: where gaze position should be + """ + + raise NotImplementedError('process() method not implemented') + + def draw(self, image: numpy.array): + """Draw calibration into image. + + Parameters: + image: where to draw + """ + + raise NotImplementedError('draw() method not implemented') + + @property + def ready(self) -> bool: + """Is the calibrator ready?""" + + raise NotImplementedError('ready getter not implemented') + GazeMovementType = TypeVar('GazeMovement', bound="GazeMovement") # Type definition for type annotation convenience -- cgit v1.1 From 322fa8af22f8880d58506fc18f4205ac4d3f937a Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 17 Oct 2023 15:58:55 +0200 Subject: adding gaze_position_calibrator to ArFrame. --- src/argaze/ArFeatures.py | 40 +++++++++++++++++++++++++---- src/argaze/GazeAnalysis/LinearRegression.py | 27 ++++++++++++++----- src/argaze/GazeFeatures.py | 19 +++++++++----- 3 files changed, 69 insertions(+), 17 deletions(-) (limited to 'src') diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index a1c7349..cb1b2f6 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -523,7 +523,8 @@ class ArFrame(): Parameters: name: name of the frame - size: defines the dimension of the rectangular area where gaze positions are projected. + size: defines the dimension of the rectangular area where gaze positions are projected + gaze_position_calibrator: gaze position calibration algoritm gaze_movement_identifier: gaze movement identification algorithm filter_in_progress_identification: ignore in progress gaze movement identification scan_path: scan path object @@ -537,6 +538,7 @@ class ArFrame(): name: str size: tuple[int] = field(default=(1, 1)) + gaze_position_calibrator: GazeFeatures.GazePositionCalibrator = field(default_factory=GazeFeatures.GazePositionCalibrator) gaze_movement_identifier: GazeFeatures.GazeMovementIdentifier = field(default_factory=GazeFeatures.GazeMovementIdentifier) filter_in_progress_identification: bool = field(default=True) scan_path: GazeFeatures.ScanPath = field(default_factory=GazeFeatures.ScanPath) @@ -600,6 +602,24 @@ class ArFrame(): new_frame_size = (0, 0) + # Load gaze position calibrator + try: + + gaze_position_calibrator_value = frame_data.pop('gaze_position_calibrator') + + gaze_position_calibrator_module_path, gaze_position_calibrator_parameters = gaze_position_calibrator_value.popitem() + + # Prepend argaze.GazeAnalysis path when a single name is provided + if len(gaze_position_calibrator_module_path.split('.')) == 1: + gaze_position_calibrator_module_path = f'argaze.GazeAnalysis.{gaze_position_calibrator_module_path}' + + gaze_position_calibrator_module = importlib.import_module(gaze_position_calibrator_module_path) + new_gaze_position_calibrator = gaze_position_calibrator_module.GazePositionCalibrator(**gaze_position_calibrator_parameters) + + except KeyError: + + new_gaze_position_calibrator = None + # Load gaze movement identifier try: @@ -756,6 +776,7 @@ class ArFrame(): # Create frame return ArFrame(new_frame_name, \ new_frame_size, \ + new_gaze_position_calibrator, \ new_gaze_movement_identifier, \ filter_in_progress_identification, \ new_scan_path, \ @@ -815,6 +836,7 @@ class ArFrame(): gaze_position: gaze position to project Returns: + current_gaze_position: calibrated gaze position if gaze_position_calibrator is instanciated else, given gaze position. identified_gaze_movement: identified gaze movement from incoming consecutive timestamped gaze positions if gaze_movement_identifier is instanciated. Current gaze movement if filter_in_progress_identification is False. scan_path_analysis: scan path analysis at each new scan step if scan_path is instanciated. layers_analysis: aoi scan path analysis at each new aoi scan step for each instanciated layers aoi scan path. @@ -828,9 +850,6 @@ class ArFrame(): # Store look execution start date look_start = time.perf_counter() - # Update current gaze position - self.__gaze_position = gaze_position - # No gaze movement identified by default identified_gaze_movement = GazeFeatures.UnvalidGazeMovement() @@ -853,6 +872,16 @@ class ArFrame(): try: + # Apply gaze position calibration + if self.gaze_position_calibrator is not None: + + self.__gaze_position = self.gaze_position_calibrator.apply(gaze_position) + + # Or update gaze position at least + else: + + self.__gaze_position = gaze_position + # Identify gaze movement if self.gaze_movement_identifier is not None: @@ -942,6 +971,7 @@ class ArFrame(): print('Warning: the following error occurs in ArFrame.look method:', e) + self.__gaze_position = GazeFeatures.UnvalidGazePosition() identified_gaze_movement = GazeFeatures.UnvalidGazeMovement() scan_step_analysis = {} layer_analysis = {} @@ -954,7 +984,7 @@ class ArFrame(): self.__look_lock.release() # Return look data - return identified_gaze_movement, scan_step_analysis, layer_analysis, execution_times, exception + return self.__gaze_position, identified_gaze_movement, scan_step_analysis, layer_analysis, execution_times, exception def __image(self, background_weight: float = None, heatmap_weight: float = None, draw_scan_path: dict = None, draw_layers: dict = None, draw_gaze_positions: dict = None, draw_fixations: dict = None, draw_saccades: dict = None) -> numpy.array: """ diff --git a/src/argaze/GazeAnalysis/LinearRegression.py b/src/argaze/GazeAnalysis/LinearRegression.py index 5a92048..de7725d 100644 --- a/src/argaze/GazeAnalysis/LinearRegression.py +++ b/src/argaze/GazeAnalysis/LinearRegression.py @@ -31,9 +31,11 @@ class GazePositionCalibrator(GazeFeatures.GazePositionCalibrator): """Linear regression intercept value""" def __post_init__(self): - """Init calibration data.""" + """Init calibration.""" - self.reset() + self.__linear_regression = LinearRegression() + self.__linear_regression.coef_ = numpy.array(self.coefficients) + self.__linear_regression.intercept_ = numpy.array(self.intercept) def store(self, timestamp: int|float, observed_gaze_position: GazeFeatures.GazePosition, expected_gaze_position: GazeFeatures.GazePosition): """Store observed and expected gaze positions.""" @@ -57,12 +59,25 @@ class GazePositionCalibrator(GazeFeatures.GazePositionCalibrator): self.__linear_regression = LinearRegression().fit(self.__observed_positions, self.__expected_positions) + # Update frozen coefficients attribute + object.__setattr__(self, 'coefficients', self.__linear_regression.coef_) + + # Update frozen intercept attribute + object.__setattr__(self, 'intercept', self.__linear_regression.intercept_) + + # Return calibrated gaze position return self.__linear_regression.score(self.__observed_positions, self.__expected_positions) def apply(self, gaze_position: GazeFeatures.GazePosition) -> GazePositionType: """Apply calibration onto observed gaze position.""" - return GazeFeatures.GazePosition(self.__linear_regression.predict(numpy.array([gaze_position.value]))[0], precision=gaze_position.precision) + if not self.calibrating: + + return GazeFeatures.GazePosition(self.__linear_regression.predict(numpy.array([gaze_position.value]))[0], precision=gaze_position.precision) + + else: + + return gaze_position def draw(self, image: numpy.array): """Draw calibration into image. @@ -74,7 +89,7 @@ class GazePositionCalibrator(GazeFeatures.GazePositionCalibrator): raise NotImplementedError('draw() method not implemented') @property - def ready(self) -> bool: - """Is the calibrator ready?""" + def calibrating(self) -> bool: + """Is the calibration running?""" - return self.__linear_regression is not None \ No newline at end of file + return self.__linear_regression is None \ No newline at end of file diff --git a/src/argaze/GazeFeatures.py b/src/argaze/GazeFeatures.py index b918256..eddd01d 100644 --- a/src/argaze/GazeFeatures.py +++ b/src/argaze/GazeFeatures.py @@ -201,6 +201,13 @@ class TimeStampedGazePositions(DataStructures.TimeStampedBuffer): return TimeStampedGazePositions(df.to_dict('index')) +class GazePositionCalibrationFailed(Exception): + """Exception raised by GazePositionCalibrator.""" + + def __init__(self, message): + + super().__init__(message) + @dataclass class GazePositionCalibrator(): """Abstract class to define what should provide a gaze position calibrator algorithm.""" @@ -237,10 +244,10 @@ class GazePositionCalibrator(): observed_gaze_position: where gaze position actually is Returns: - expected_gaze_position: where gaze position should be + expected_gaze_position: where gaze position should be if the calibrator is ready else, observed gaze position """ - raise NotImplementedError('process() method not implemented') + raise NotImplementedError('apply() method not implemented') def draw(self, image: numpy.array): """Draw calibration into image. @@ -252,8 +259,8 @@ class GazePositionCalibrator(): raise NotImplementedError('draw() method not implemented') @property - def ready(self) -> bool: - """Is the calibrator ready?""" + def calibrating(self) -> bool: + """Is the calibration running?""" raise NotImplementedError('ready getter not implemented') @@ -601,7 +608,7 @@ ScanStepType = TypeVar('ScanStep', bound="ScanStep") # Type definition for type annotation convenience class ScanStepError(Exception): - """Exception raised at ScanStepError creation if a aoi scan step doesn't start by a fixation or doesn't end by a saccade.""" + """Exception raised at ScanStep creation if a aoi scan step doesn't start by a fixation or doesn't end by a saccade.""" def __init__(self, message): @@ -811,7 +818,7 @@ AOIScanStepType = TypeVar('AOIScanStep', bound="AOIScanStep") # Type definition for type annotation convenience class AOIScanStepError(Exception): - """Exception raised at AOIScanStepError creation if a aoi scan step doesn't start by a fixation or doesn't end by a saccade.""" + """Exception raised at AOIScanStep creation if a aoi scan step doesn't start by a fixation or doesn't end by a saccade.""" def __init__(self, message, aoi=''): -- cgit v1.1 From 86264748b88700ae7a197bfee7004c5114b17225 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 17 Oct 2023 17:55:03 +0200 Subject: Fixing JsonEncoder to handle class with numpy attributes. --- src/argaze/DataStructures.py | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/argaze/DataStructures.py b/src/argaze/DataStructures.py index 08a7d2c..b5101b2 100644 --- a/src/argaze/DataStructures.py +++ b/src/argaze/DataStructures.py @@ -45,6 +45,15 @@ def as_dict(dataclass_object) -> dict: # Copy fields values return {name: vars(dataclass_object)[name] for name in fields_names} +def module_path(obj): + """ + Get object module path. + + Returns: + module path + """ + return obj.__class__.__module__ + class JsonEncoder(json.JSONEncoder): """Specific ArGaze JSON Encoder.""" @@ -55,10 +64,10 @@ class JsonEncoder(json.JSONEncoder): if isinstance(obj, numpy.integer): return int(obj) - if isinstance(obj, numpy.floating): + elif isinstance(obj, numpy.floating): return float(obj) - if isinstance(obj, numpy.ndarray): + elif isinstance(obj, numpy.ndarray): return obj.tolist() # default case @@ -73,7 +82,19 @@ class JsonEncoder(json.JSONEncoder): public_dict = {} for k, v in vars(obj).items(): + if not k.startswith('_'): + + # numpy cases + if isinstance(v, numpy.integer): + v = int(v) + + elif isinstance(v, numpy.floating): + v = float(v) + + elif isinstance(v, numpy.ndarray): + v = v.tolist() + public_dict[k] = v return public_dict -- cgit v1.1 From 45af88fcb056ca0d5fd1be49972cef9b0f275fad Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 17 Oct 2023 17:57:08 +0200 Subject: Loading and saving GazePositionCalibrator instance from JSON file. --- src/argaze/ArFeatures.py | 19 +++++++++++++------ src/argaze/GazeFeatures.py | 45 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 6 deletions(-) (limited to 'src') diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index cb1b2f6..2e278ea 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -607,14 +607,21 @@ class ArFrame(): gaze_position_calibrator_value = frame_data.pop('gaze_position_calibrator') - gaze_position_calibrator_module_path, gaze_position_calibrator_parameters = gaze_position_calibrator_value.popitem() + # str: relative path to file + if type(gaze_position_calibrator_value) == str: - # Prepend argaze.GazeAnalysis path when a single name is provided - if len(gaze_position_calibrator_module_path.split('.')) == 1: - gaze_position_calibrator_module_path = f'argaze.GazeAnalysis.{gaze_position_calibrator_module_path}' + filepath = os.path.join(working_directory, gaze_position_calibrator_value) + file_format = filepath.split('.')[-1] + + # JSON file format + if file_format == 'json': + + new_gaze_position_calibrator = GazeFeatures.GazePositionCalibrator.from_json(filepath) + + # dict: + else: - gaze_position_calibrator_module = importlib.import_module(gaze_position_calibrator_module_path) - new_gaze_position_calibrator = gaze_position_calibrator_module.GazePositionCalibrator(**gaze_position_calibrator_parameters) + new_gaze_position_calibrator = GazePositionCalibrator.from_dict(gaze_position_calibrator_value) except KeyError: diff --git a/src/argaze/GazeFeatures.py b/src/argaze/GazeFeatures.py index eddd01d..46e9f17 100644 --- a/src/argaze/GazeFeatures.py +++ b/src/argaze/GazeFeatures.py @@ -12,6 +12,7 @@ from dataclasses import dataclass, field import math import ast import json +import importlib from inspect import getmembers from argaze import DataStructures @@ -208,10 +209,54 @@ class GazePositionCalibrationFailed(Exception): super().__init__(message) +GazePositionCalibratorType = TypeVar('GazePositionCalibrator', bound="GazePositionCalibrator") +# Type definition for type annotation convenience + @dataclass class GazePositionCalibrator(): """Abstract class to define what should provide a gaze position calibrator algorithm.""" + @classmethod + def from_dict(self, calibrator_data: dict) -> GazePositionCalibratorType: + """Load gaze position calibrator from dictionary. + + Parameters: + calibrator_data: dictionary with class name and attributes to load + """ + gaze_position_calibrator_module_path, gaze_position_calibrator_parameters = calibrator_data.popitem() + + # Prepend argaze.GazeAnalysis path when a single name is provided + if len(gaze_position_calibrator_module_path.split('.')) == 1: + gaze_position_calibrator_module_path = f'argaze.GazeAnalysis.{gaze_position_calibrator_module_path}' + + gaze_position_calibrator_module = importlib.import_module(gaze_position_calibrator_module_path) + return gaze_position_calibrator_module.GazePositionCalibrator(**gaze_position_calibrator_parameters) + + @classmethod + def from_json(self, json_filepath: str) -> GazePositionCalibratorType: + """Load calibrator from .json file.""" + + # Remember file path to ease rewriting + self.__json_filepath = json_filepath + + # Open file + with open(self.__json_filepath) as calibration_file: + + return GazePositionCalibrator.from_dict(json.load(calibration_file)) + + def to_json(self, json_filepath: str = None): + """Save calibrator into .json file.""" + + # Remember file path to ease rewriting + if json_filepath is not None: + + self.__json_filepath = json_filepath + + # Open file + with open(self.__json_filepath, 'w', encoding='utf-8') as calibration_file: + + json.dump({DataStructures.module_path(self):DataStructures.JsonEncoder().default(self)}, calibration_file, ensure_ascii=False, indent=4) + def store(self, timestamp: int|float, observed_gaze_position: GazePosition, expected_gaze_position: GazePosition): """Store observed and expected gaze positions. -- cgit v1.1 From 07fb4ce51650e9b0edaf1b9ebc01c1b9589c9a54 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 17 Oct 2023 18:40:18 +0200 Subject: Implementing LinearRegression drawing function. --- src/argaze/GazeAnalysis/LinearRegression.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) (limited to 'src') diff --git a/src/argaze/GazeAnalysis/LinearRegression.py b/src/argaze/GazeAnalysis/LinearRegression.py index de7725d..0e10b87 100644 --- a/src/argaze/GazeAnalysis/LinearRegression.py +++ b/src/argaze/GazeAnalysis/LinearRegression.py @@ -79,14 +79,26 @@ class GazePositionCalibrator(GazeFeatures.GazePositionCalibrator): return gaze_position - def draw(self, image: numpy.array): - """Draw calibration into image. + def draw(self, image: numpy.array, size: tuple, resolution: tuple, line_color: tuple = (0, 0, 0), thickness: int = 1): + """Draw calibration field.""" + + width, height = size - Parameters: - image: where to draw - """ + if width * height > 0: + + rx, ry = resolution + lx = numpy.linspace(0, width, rx) + ly = numpy.linspace(0, height, ry) + xv, yv = numpy.meshgrid(lx, ly, indexing='ij') + + for i in range(rx): + + for j in range(ry): + + start = (xv[i][j], yv[i][j]) + end = self.apply(GazeFeatures.GazePosition(start)).value - raise NotImplementedError('draw() method not implemented') + cv2.line(image, (int(start[0]), int(start[1])), (int(end[0]), int(end[1])), line_color, thickness) @property def calibrating(self) -> bool: -- cgit v1.1 From 4575911ec8a04d7d228109acbcb58b448b72fb3b Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 17 Oct 2023 18:49:34 +0200 Subject: Adding draw_gaze_position_calibrator as image_parameters ArFrame option. --- src/argaze/ArFeatures.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 2e278ea..13b952d 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -993,13 +993,14 @@ class ArFrame(): # Return look data return self.__gaze_position, identified_gaze_movement, scan_step_analysis, layer_analysis, execution_times, exception - def __image(self, background_weight: float = None, heatmap_weight: float = None, draw_scan_path: dict = None, draw_layers: dict = None, draw_gaze_positions: dict = None, draw_fixations: dict = None, draw_saccades: dict = None) -> numpy.array: + def __image(self, background_weight: float = None, heatmap_weight: float = None, draw_gaze_position_calibrator: dict = None, draw_scan_path: dict = None, draw_layers: dict = None, draw_gaze_positions: dict = None, draw_fixations: dict = None, draw_saccades: dict = None) -> numpy.array: """ Get background image with overlaid visualisations. Parameters: background_weight: weight of background overlay heatmap_weight: weight of heatmap overlay + draw_gaze_position_calibrator: [GazeFeatures.GazePositionCalibrator.draw](argaze.md/#argaze.GazeFeatures.GazePositionCalibrator.draw) parameters (if None, nothing is drawn) draw_scan_path: [GazeFeatures.ScanPath.draw](argaze.md/#argaze.GazeFeatures.ScanPath.draw) parameters (if None, no scan path is drawn) draw_layers: dictionary of [ArLayer.draw](argaze.md/#argaze.ArFeatures.ArLayer.draw) parameters per layer (if None, no layer is drawn) draw_gaze_positions: [GazeFeatures.GazePosition.draw](argaze.md/#argaze.GazeFeatures.GazePosition.draw) parameters (if None, no gaze position is drawn) @@ -1032,6 +1033,11 @@ class ArFrame(): image = numpy.full((self.size[1], self.size[0], 3), 0).astype(numpy.uint8) + # Draw gaze position calibrator + if draw_gaze_position_calibrator is not None: + + self.gaze_position_calibrator.draw(image, size=self.size, **draw_gaze_position_calibrator) + # Draw scan path if required if draw_scan_path is not None and self.scan_path is not None: -- cgit v1.1 From 531775bd115c49fc15674fa9b53cb157b29ffaa8 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 18 Oct 2023 12:24:32 +0200 Subject: Fixing GazePositionCalibrator dict loading. --- src/argaze/ArFeatures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 13b952d..5b5d418 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -621,7 +621,7 @@ class ArFrame(): # dict: else: - new_gaze_position_calibrator = GazePositionCalibrator.from_dict(gaze_position_calibrator_value) + new_gaze_position_calibrator = GazeFeatures.GazePositionCalibrator.from_dict(gaze_position_calibrator_value) except KeyError: -- cgit v1.1 From d9dc8fc6f542c1ba46cba9d66a741890f946474a Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Mon, 23 Oct 2023 14:34:10 +0200 Subject: Removing draw_places_axes image parameter. Fixing aruco markers group drawing. --- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 35 ++-------------------------- 1 file changed, 2 insertions(+), 33 deletions(-) (limited to 'src') diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index edae927..b4aedbd 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -432,11 +432,7 @@ class ArUcoMarkersGroup(): try: - T = self.places[identifier].translation - R = self.places[identifier].rotation - - placePoints = (T + numpy.float32([R.dot([-l, -l, 0]), R.dot([l, -l, 0]), R.dot([l, l, 0]), R.dot([-l, l, 0])])).reshape(-1, 3) - placePoints, _ = cv2.projectPoints(placePoints, self._rotation, self._translation, numpy.array(K), numpy.array(D)) + placePoints, _ = cv2.projectPoints(place.corners, self._rotation, self._translation, numpy.array(K), numpy.array(D)) placePoints = placePoints.astype(int) cv2.line(image, tuple(placePoints[0].ravel()), tuple(placePoints[1].ravel()), color, border_size) @@ -448,29 +444,7 @@ class ArUcoMarkersGroup(): except cv2.error: pass - def draw_places_axes(self, image: numpy.array, K, D, thickness: int = 0, length: float = 0): - """Draw group place axes.""" - - for identifier, place in self.places.items(): - - try: - - T = self.places[identifier].translation - R = self.places[identifier].rotation - - axisPoints = (T + numpy.float32([R.dot([length, 0, 0]), R.dot([0, length, 0]), R.dot([0, 0, length]), R.dot([0, 0, 0])])).reshape(-1, 3) - axisPoints, _ = cv2.projectPoints(axisPoints, self._rotation, self._translation, numpy.array(K), numpy.array(D)) - axisPoints = axisPoints.astype(int) - - cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (0, 0, 255), thickness) # X (red) - cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (0, 255, 0), thickness) # Y (green) - cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (255, 0, 0), thickness) # Z (blue) - - # Ignore errors due to out of field places: their coordinate are larger than int32 limitations. - except cv2.error: - pass - - def draw(self, image: numpy.array, K, D, draw_axes: dict = None, draw_places: dict = None, draw_places_axes: dict = None): + def draw(self, image: numpy.array, K, D, draw_axes: dict = None, draw_places: dict = None): """Draw group axes and places. Parameters: @@ -489,11 +463,6 @@ class ArUcoMarkersGroup(): self.draw_places(image, K, D, **draw_places) - # Draw places axes if required - if draw_places_axes is not None: - - self.draw_places_axes(image, K, D, **draw_places_axes) - def to_obj(self, obj_filepath): """Save group to .obj file.""" -- cgit v1.1 From ecfbc519e518948078b0831ccd0fa8bb18e439cb Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Mon, 23 Oct 2023 15:00:17 +0200 Subject: Removing parsing of normals for ArUcoMarkersGroup. --- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) (limited to 'src') diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index b4aedbd..5cacf09 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -152,9 +152,6 @@ class ArUcoMarkersGroup(): !!! note All markers have to belong to the same dictionary. - !!! note - Marker normal vectors (vn) expected. - """ new_marker_size = 0 @@ -165,8 +162,7 @@ class ArUcoMarkersGroup(): OBJ_RX_DICT = { 'object': re.compile(r'o (.*)#([0-9]+)_(.*)\n'), 'vertice': re.compile(r'v ([+-]?[0-9]*[.]?[0-9]+) ([+-]?[0-9]*[.]?[0-9]+) ([+-]?[0-9]*[.]?[0-9]+)\n'), - 'normal': re.compile(r'vn ([+-]?[0-9]*[.]?[0-9]+) ([+-]?[0-9]*[.]?[0-9]+) ([+-]?[0-9]*[.]?[0-9]+)\n'), - 'face': re.compile(r'f ([0-9]+)//([0-9]+) ([0-9]+)//([0-9]+) ([0-9]+)//([0-9]+) ([0-9]+)//([0-9]+)\n'), + 'face': re.compile(r'f ([0-9]+) ([0-9]+) ([0-9]+) ([0-9]+)\n'), 'comment': re.compile(r'#(.*)\n') # keep comment regex after object regex because the # is used in object string too } @@ -186,7 +182,6 @@ class ArUcoMarkersGroup(): identifier = None vertices = [] - normals = {} faces = {} # Open the file and read through it line by line @@ -225,15 +220,10 @@ class ArUcoMarkersGroup(): vertices.append(tuple([float(match.group(1)), float(match.group(2)), float(match.group(3))])) - # Extract normal to calculate rotation matrix - elif key == 'normal': - - normals[identifier] = tuple([float(match.group(1)), float(match.group(2)), float(match.group(3))]) - # Extract vertice ids elif key == 'face': - faces[identifier] = [int(match.group(1)), int(match.group(3)), int(match.group(5)), int(match.group(7))] + faces[identifier] = [int(match.group(1)), int(match.group(2)), int(match.group(3)), int(match.group(4))] # Go to next line line = file.readline() @@ -487,10 +477,5 @@ class ArUcoMarkersGroup(): vertices += f' {v_count}//{p+1}' - # Write normal vector - nvec = numpy.cross(place.corners[-1] - place.corners[0], place.corners[1] - place.corners[0]) - nvec = nvec / numpy.linalg.norm(nvec) - file.write(f'vn {nvec[0]} {nvec[1]} {nvec[2]}\n') - #file.write('s off\n') file.write(f'f{vertices}\n') -- cgit v1.1 From 3a83442c2da865b6307748a2f26c48fa1bb3fbc8 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Mon, 23 Oct 2023 15:06:20 +0200 Subject: Fixing documentation annotation. --- src/argaze/ArFeatures.py | 2 +- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 1 - src/argaze/DataStructures.py | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 5b5d418..5ec6b7e 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -831,7 +831,7 @@ class ArFrame(): return self.__ts_logs - def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition = GazeFeatures.UnvalidGazePosition()) -> Tuple[GazeFeatures.GazeMovement, dict, dict, dict, Exception]: + def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition = GazeFeatures.UnvalidGazePosition()) -> Tuple[GazeFeatures.GazePosition, GazeFeatures.GazeMovement, dict, dict, dict, Exception]: """ Project gaze position into frame. diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index 5cacf09..6ffdae2 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -440,7 +440,6 @@ class ArUcoMarkersGroup(): Parameters: draw_axes: draw_axes parameters (if None, no axes drawn) draw_places: draw_places parameters (if None, no places drawn) - draw_places_axes: draw_places_axes parameters (if None, no places axes drawn) """ # Draw axes if required diff --git a/src/argaze/DataStructures.py b/src/argaze/DataStructures.py index b5101b2..9e35dea 100644 --- a/src/argaze/DataStructures.py +++ b/src/argaze/DataStructures.py @@ -45,7 +45,7 @@ def as_dict(dataclass_object) -> dict: # Copy fields values return {name: vars(dataclass_object)[name] for name in fields_names} -def module_path(obj): +def module_path(obj) -> str: """ Get object module path. -- cgit v1.1 From f8b1a36c9e486ef19f62159475b9bf19a5b90a03 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Mon, 23 Oct 2023 22:43:24 +0200 Subject: Fixing aruco markers group exportation. Renaming ready made script. Updating documentation. --- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 2 +- src/argaze/utils/aruco_markers_group_export.py | 160 ++++++++++++++++++++++ src/argaze/utils/aruco_markers_scene_export.py | 176 ------------------------- 3 files changed, 161 insertions(+), 177 deletions(-) create mode 100644 src/argaze/utils/aruco_markers_group_export.py delete mode 100644 src/argaze/utils/aruco_markers_scene_export.py (limited to 'src') diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index 6ffdae2..37bceec 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -474,7 +474,7 @@ class ArUcoMarkersGroup(): file.write(f'v {" ".join(map(str, place.corners[v]))}\n') v_count += 1 - vertices += f' {v_count}//{p+1}' + vertices += f' {v_count}' #file.write('s off\n') file.write(f'f{vertices}\n') diff --git a/src/argaze/utils/aruco_markers_group_export.py b/src/argaze/utils/aruco_markers_group_export.py new file mode 100644 index 0000000..d948105 --- /dev/null +++ b/src/argaze/utils/aruco_markers_group_export.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python + +""" """ + +__author__ = "Théo de la Hogue" +__credits__ = [] +__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)" +__license__ = "BSD" + +import argparse +import time +import itertools + +from argaze.ArUcoMarkers import ArUcoCamera, ArUcoMarkersGroup +from argaze.utils import UtilsFeatures + +import cv2 +import numpy + +def main(): + """ + Load a MOVIE and an ArUcoCamera CONFIGURATION to detect ArUco markers inside a selected movie frame then, export detected ArUco markers group as .obj file into an OUTPUT folder. + """ + + # Manage arguments + parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) + parser.add_argument('movie', metavar='MOVIE', type=str, default=None, help='movie path') + parser.add_argument('configuration', metavar='CONFIGURATION', type=str, default=None, help='ArUco camera configuration') + + parser.add_argument('-s','--start', metavar='START', type=float, default=0., help='start time in second') + parser.add_argument('-o', '--output', metavar='OUTPUT', type=str, default='.', help='export folder path') + args = parser.parse_args() + + # Load movie + video_capture = cv2.VideoCapture(args.movie) + + video_fps = video_capture.get(cv2.CAP_PROP_FPS) + image_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)) + image_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) + + # Load ArUco camera + aruco_camera = ArUcoCamera.ArUcoCamera.from_json(args.configuration) + + # Create empty ArUco scene + aruco_markers_group = None + + # Create a window + cv2.namedWindow(aruco_camera.name, cv2.WINDOW_AUTOSIZE) + + # Enable exit signal handler + exit = UtilsFeatures.ExitSignalHandler() + + # Init image selection + current_image_index = -1 + _, current_image = video_capture.read() + next_image_index = int(args.start * video_fps) + refresh = False + + while not exit.status(): + + # Select a new image and detect markers once + if next_image_index != current_image_index or refresh: + + video_capture.set(cv2.CAP_PROP_POS_FRAMES, next_image_index) + + success, video_image = video_capture.read() + + if success: + + # Refresh once + refresh = False + + current_image_index = video_capture.get(cv2.CAP_PROP_POS_FRAMES) - 1 + current_image_time = video_capture.get(cv2.CAP_PROP_POS_MSEC) + + # Detect markers + detection_time, projection_time, exceptions = aruco_camera.watch(video_image) + + # Estimate each markers pose + aruco_camera.aruco_detector.estimate_markers_pose(aruco_camera.aruco_detector.detected_markers) + + # Build aruco scene from detected markers + aruco_markers_group = ArUcoMarkersGroup.ArUcoMarkersGroup(aruco_camera.aruco_detector.marker_size, aruco_camera.aruco_detector.dictionary, aruco_camera.aruco_detector.detected_markers) + + # Get camera image + camera_image = aruco_camera.image() + + # Write detected markers + cv2.putText(camera_image, f'Detecting markers {list(aruco_camera.aruco_detector.detected_markers.keys())}', (20, aruco_camera.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) + + # Write timing + cv2.putText(camera_image, f'Frame at {int(current_image_time)}ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) + cv2.putText(camera_image, f'Detection {int(detection_time)}ms', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) + cv2.putText(camera_image, f'Projection {int(projection_time)}ms', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) + + # Write documentation + cv2.putText(camera_image, f'<- previous image', (aruco_camera.size[0]-500, aruco_camera.size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) + cv2.putText(camera_image, f'-> next image', (aruco_camera.size[0]-500, aruco_camera.size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) + cv2.putText(camera_image, f'Ctrl+s: export ArUco markers', (aruco_camera.size[0]-500, aruco_camera.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) + + # Copy image + current_image = camera_image.copy() + + # Keep last image + else: + + video_image = current_image.copy() + + key_pressed = cv2.waitKey(10) + + #if key_pressed != -1: + # print(key_pressed) + + # Select previous image with left arrow + if key_pressed == 2: + next_image_index -= 1 + + # Select next image with right arrow + if key_pressed == 3: + next_image_index += 1 + + # Clip image index + if next_image_index < 0: + next_image_index = 0 + + # r: reload configuration + if key_pressed == 114: + + aruco_camera = ArUcoCamera.ArUcoCamera.from_json(args.configuration) + refresh = True + print('Configuration reloaded') + + # Save selected marker edition using 'Ctrl + s' + if key_pressed == 19: + + if aruco_markers_group: + + aruco_markers_group.to_obj(f'{args.output}/{int(current_image_time)}-aruco_markers_group.obj') + print(f'ArUco markers saved into {args.output}') + + else: + + print(f'No ArUco markers to export') + + # Close window using 'Esc' key + if key_pressed == 27: + break + + # Display video + cv2.imshow(aruco_camera.name, video_image) + + # Close movie capture + video_capture.release() + + # Stop image display + cv2.destroyAllWindows() + +if __name__ == '__main__': + + main() \ No newline at end of file diff --git a/src/argaze/utils/aruco_markers_scene_export.py b/src/argaze/utils/aruco_markers_scene_export.py deleted file mode 100644 index f618342..0000000 --- a/src/argaze/utils/aruco_markers_scene_export.py +++ /dev/null @@ -1,176 +0,0 @@ -#!/usr/bin/env python - -""" """ - -__author__ = "Théo de la Hogue" -__credits__ = [] -__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)" -__license__ = "BSD" - -import argparse -import time -import itertools - -from argaze.ArUcoMarkers import ArUcoMarkersDictionary, ArUcoOpticCalibrator, ArUcoDetector, ArUcoMarkersGroup -from argaze.utils import UtilsFeatures - -import cv2 -import numpy - -def main(): - """ - Load a movie with ArUco markers inside and select image into it, detect ArUco markers belonging to a given dictionary and size into the selected image thanks to given optic parameters and detector parameters then, export detected ArUco scene as .obj file. - """ - - # Manage arguments - parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) - parser.add_argument('movie', metavar='MOVIE', type=str, default=None, help='movie path') - parser.add_argument('dictionary', metavar='DICTIONARY', type=str, default=None, help='ArUco dictionary to detect') - parser.add_argument('marker_size', metavar='MARKER_SIZE', type=int, default=3, help='marker size in cm') - parser.add_argument('optic_parameters', metavar='OPTIC_PARAMETERS', type=str, default=None, help='Optic parameters from camera calibration process') - parser.add_argument('detector_parameters', metavar='DETECTOR_PARAMETERS', type=str, default=None, help='ArUco detector parameters') - - parser.add_argument('-s','--start', metavar='START', type=float, default=0., help='start time in second') - parser.add_argument('-o', '--output', metavar='OUT', type=str, default='.', help='export scene folder path') - args = parser.parse_args() - - # Load movie - video_capture = cv2.VideoCapture(args.movie) - - video_fps = video_capture.get(cv2.CAP_PROP_FPS) - image_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)) - image_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) - - # Load ArUco dictionary - aruco_dictionary = ArUcoMarkersDictionary.ArUcoMarkersDictionary(args.dictionary) - - # Load optic parameters - optic_parameters = ArUcoOpticCalibrator.OpticParameters.from_json(args.optic_parameters) - - # Load detector parameters - detector_parameters = ArUcoDetector.DetectorParameters.from_json(args.detector_parameters) - - # Create ArUco detector - aruco_detector = ArUcoDetector.ArUcoDetector(dictionary=aruco_dictionary, marker_size=args.marker_size, optic_parameters=optic_parameters, parameters=detector_parameters) - - # Create empty ArUco scene - aruco_markers_group = None - - # Create a window to display AR environment - window_name = "Export ArUco scene" - cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE) - - # Enable exit signal handler - exit = UtilsFeatures.ExitSignalHandler() - - # Init image selection - current_image_index = -1 - _, current_image = video_capture.read() - next_image_index = int(args.start * video_fps) - refresh = False - - # Hide help - draw_help = False - - while not exit.status(): - - # Select a new image and detect markers once - if next_image_index != current_image_index or refresh: - - video_capture.set(cv2.CAP_PROP_POS_FRAMES, next_image_index) - - success, video_image = video_capture.read() - - if success: - - # Refresh once - refresh = False - - current_image_index = video_capture.get(cv2.CAP_PROP_POS_FRAMES) - 1 - current_image_time = video_capture.get(cv2.CAP_PROP_POS_MSEC) - - # Detect markers - aruco_detector.detect_markers(video_image) - - # Estimate markers pose - aruco_detector.estimate_markers_pose() - - # Build aruco scene from detected markers - aruco_markers_group = ArUcoMarkersGroup.ArUcoMarkersGroup(args.marker_size, aruco_dictionary, aruco_detector.detected_markers) - - # Write scene detected markers - cv2.putText(video_image, f'{list(aruco_detector.detected_markers.keys())}', (20, image_height-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) - - # Write timing - cv2.putText(video_image, f'Time: {int(current_image_time)} ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) - - # Copy image - current_image = video_image.copy() - - # Keep last image - else: - - video_image = current_image.copy() - - # Draw detected markers - aruco_detector.draw_detected_markers(video_image, {"color": [0, 255, 0], "draw_axes": {"thickness": 4}}) - - # Write documentation - cv2.putText(video_image, f'Press \'h\' for help', (950, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) - - if draw_help: - - cv2.rectangle(video_image, (0, 50), (500, 300), (127, 127, 127), -1) - cv2.putText(video_image, f'> Left arrow: previous image', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) - cv2.putText(video_image, f'> Right arrow: next image', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) - cv2.putText(video_image, f'> Ctrl+s: export ArUco scene', (20, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) - - key_pressed = cv2.waitKey(10) - - #if key_pressed != -1: - # print(key_pressed) - - # Select previous image with left arrow - if key_pressed == 2: - next_image_index -= 1 - - # Select next image with right arrow - if key_pressed == 3: - next_image_index += 1 - - # Clip image index - if next_image_index < 0: - next_image_index = 0 - - # Switch help mode with h key - if key_pressed == 104: - draw_help = not draw_help - - # Save selected marker edition using 'Ctrl + s' - if key_pressed == 19: - - if aruco_markers_group: - - aruco_markers_group.to_obj(f'{args.output}/{int(current_image_time)}-aruco_markers_group.obj') - print(f'ArUco scene saved into {args.output}') - - else: - - print(f'No ArUco scene to export') - - # Close window using 'Esc' key - if key_pressed == 27: - break - - # Display video - cv2.imshow(window_name, video_image) - - # Close movie capture - video_capture.release() - - # Stop image display - cv2.destroyAllWindows() - -if __name__ == '__main__': - - main() \ No newline at end of file -- cgit v1.1