From b947573f7dbccb5b2b13b64677192145f2dbb864 Mon Sep 17 00:00:00 2001 From: Theo De La Hogue Date: Fri, 22 Sep 2023 22:06:20 +0200 Subject: Working on AOI frame feature: now 2D AOI in scene frame are merged into 3D AOI in scene layer. --- src/argaze.test/AreaOfInterest/AOIFeatures.py | 16 ++-- src/argaze/ArFeatures.py | 94 ++++++++++++---------- src/argaze/AreaOfInterest/AOI2DScene.py | 31 ++++++- src/argaze/AreaOfInterest/AOI3DScene.py | 9 ++- src/argaze/AreaOfInterest/AOIFeatures.py | 81 ++++++++++++++----- src/argaze/utils/demo_data/aoi_2d_scene.json | 5 ++ src/argaze/utils/demo_data/aoi_3d_scene.obj | 50 ------------ .../utils/demo_data/demo_aruco_markers_setup.json | 12 ++- .../utils/demo_data/demo_gaze_analysis_setup.json | 7 +- src/argaze/utils/demo_gaze_analysis_run.py | 20 ++--- 10 files changed, 175 insertions(+), 150 deletions(-) create mode 100644 src/argaze/utils/demo_data/aoi_2d_scene.json diff --git a/src/argaze.test/AreaOfInterest/AOIFeatures.py b/src/argaze.test/AreaOfInterest/AOIFeatures.py index bced0aa..6df33ca 100644 --- a/src/argaze.test/AreaOfInterest/AOIFeatures.py +++ b/src/argaze.test/AreaOfInterest/AOIFeatures.py @@ -118,13 +118,17 @@ class TestAreaOfInterestClass(unittest.TestCase): aoi_2D = AOIFeatures.AreaOfInterest([[0, 0], [0, 2], [2, 2], [2, 0]]) - self.assertEqual(aoi_2D.inner_axis((1, 1)), (0.5, 0.5)) + self.assertEqual(aoi_2D.inner_axis(1, 1), (0.5, 0.5)) def test_outter_axis(self): aoi_2D = AOIFeatures.AreaOfInterest([[0, 0], [0, 2], [2, 2], [2, 0]]) - self.assertEqual(aoi_2D.outter_axis((0.5, 0.5)), (1, 1)) + self.assertEqual(aoi_2D.outter_axis(0.5, 0.5), (1, 1)) + + aoi_3D = AOIFeatures.AreaOfInterest([[1, 0, 0], [1, 0, 2], [1, 2, 2], [1, 2, 0]]) + + self.assertEqual(aoi_3D.outter_axis(0.5, 0.5), (1, 1, 1)) def test_circle_intersection(self): @@ -181,21 +185,15 @@ class TestAOISceneClass(unittest.TestCase): aoi_2d_scene_AB["A"] = AOIFeatures.AreaOfInterest([[0, 0], [0, 1], [1, 1], [1, 0]]) aoi_2d_scene_AB["B"] = AOIFeatures.AreaOfInterest([[0, 0], [0, 2], [2, 2], [2, 0]]) - print('aoi_2d_scene_AB vars: ', vars(aoi_2d_scene_AB)) - # Create second scene with C and D aoi aoi_2d_scene_CD = AOIFeatures.AOIScene(2,) aoi_2d_scene_CD["C"] = AOIFeatures.AreaOfInterest([[0, 0], [0, 3], [3, 3], [3, 0]]) aoi_2d_scene_CD["D"] = AOIFeatures.AreaOfInterest([[0, 0], [0, 4], [4, 4], [4, 0]]) - print('aoi_2d_scene_CD vars: ', vars(aoi_2d_scene_CD)) - # Merge first scene and second scene into a third scene aoi_2d_scene_ABCD = aoi_2d_scene_AB | aoi_2d_scene_CD - print('aoi_2d_scene_ABCD vars: ', vars(aoi_2d_scene_ABCD)) - # Check third scene self.assertEqual(aoi_2d_scene_ABCD.dimension, 2) self.assertEqual(len(aoi_2d_scene_ABCD.items()), 4) @@ -205,8 +203,6 @@ class TestAOISceneClass(unittest.TestCase): # Merge second scene into first scene aoi_2d_scene_AB |= aoi_2d_scene_CD - print('aoi_2d_scene_AB vars: ', vars(aoi_2d_scene_AB)) - # Check first scene self.assertEqual(aoi_2d_scene_AB.dimension, 2) self.assertEqual(len(aoi_2d_scene_AB.items()), 4) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 96976c2..ad17df2 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -192,6 +192,8 @@ class ArLayer(): except KeyError: + pass + # Add AOI 2D Scene by default new_aoi_scene = AOI2DScene.AOI2DScene() @@ -484,7 +486,7 @@ class ArLayer(): # Draw aoi if required if draw_aoi_scene is not None: - + self.aoi_scene.draw(image, **draw_aoi_scene) # Draw aoi matching if required @@ -728,11 +730,6 @@ class ArFrame(): # Create layer new_layer = ArLayer.from_dict(layer_data, working_directory) - # Project 3D aoi scene layer to get only 2D aoi scene - if new_layer.aoi_scene.dimension == 3: - - new_layer.aoi_scene = new_layer.aoi_scene.orthogonal_projection * new_frame_size - # Append new layer new_layers[layer_name] = new_layer @@ -1099,13 +1096,6 @@ class ArScene(): frame.parent = self - # Preprocess orthogonal projection to speed up further processings - self.__orthogonal_projection_cache = {} - - for layer_name, layer in self.layers.items(): - - self.__orthogonal_projection_cache[layer_name] = layer.aoi_scene.orthogonal_projection - def __str__(self) -> str: """ Returns: @@ -1184,54 +1174,70 @@ class ArScene(): for frame_name, frame_data in scene_data.pop('frames').items(): - # Append name - frame_data['name'] = frame_name + # str: relative path to file + if type(frame_data) == str: + + filepath = os.path.join(working_directory, frame_data) + file_format = filepath.split('.')[-1] + + # JSON file format for 2D or 3D dimension + if file_format == 'json': + + new_frame = ArFrame.from_json(filepath) - # Create frame - new_frame = ArFrame.from_dict(frame_data, working_directory) + # dict: + else: - # Look for AOI with same frame name - aoi_frame = None - aoi_frame_found = False - for layer_name, layer in new_layers.items(): + # Append name + frame_data['name'] = frame_name + + new_frame = ArFrame.from_dict(frame_data, working_directory) + + # Look for a scene layer with an AOI named like the frame + for scene_layer_name, scene_layer in new_layers.items(): try: - aoi_frame = layer.aoi_scene[frame_name] - aoi_frame_found = True + frame_3d = scene_layer.aoi_scene[frame_name] - except KeyError: + # Check that the frame have a layer named like this scene layer + aoi_2d_scene = new_frame.layers[scene_layer_name].aoi_scene + + # Transform 2D frame layer AOIs into 3D scene layer AOIs + # Then, add them to scene layer + scene_layer.aoi_scene |= aoi_2d_scene.dimensionalize(frame_3d, new_frame.size) - # AOI name should be unique - break + '''DEPRECATED: but maybe still usefull? + # Project and reframe each layers into corresponding frame layers + for frame_layer_name, frame_layer in new_frame.layers.items(): - if aoi_frame_found: + try: - # Project and reframe each layers into corresponding frame layers - for frame_layer_name, frame_layer in new_frame.layers.items(): + layer = new_layers[frame_layer_name] + + layer_aoi_scene_projection = layer.aoi_scene.orthogonal_projection + aoi_frame_projection = layer_aoi_scene_projection[frame_name] - try: + frame_layer.aoi_scene = layer_aoi_scene_projection.reframe(aoi_frame_projection, new_frame.size) - layer = new_layers[frame_layer_name] - - layer_aoi_scene_projection = layer.aoi_scene.orthogonal_projection - aoi_frame_projection = layer_aoi_scene_projection[frame_name] + if frame_layer.aoi_scan_path is not None: - frame_layer.aoi_scene = layer_aoi_scene_projection.reframe(aoi_frame_projection, new_frame.size) + # Edit expected AOI list by removing AOI with name equals to frame layer name + expected_aois = list(layer.aoi_scene.keys()) - if frame_layer.aoi_scan_path is not None: + if frame_layer_name in expected_aois: + expected_aois.remove(frame_layer_name) - # Edit expected AOI list by removing AOI with name equals to frame layer name - expected_aois = list(layer.aoi_scene.keys()) + frame_layer.aoi_scan_path.expected_aois = expected_aois - if frame_layer_name in expected_aois: - expected_aois.remove(frame_layer_name) + except KeyError: - frame_layer.aoi_scan_path.expected_aois = expected_aois + continue + ''' - except KeyError: + except KeyError as e: - continue + print(e) # Append new frame new_frames[frame_name] = new_frame @@ -1437,7 +1443,7 @@ class ArCamera(ArFrame): # TODO?: Should we prefer to use camera frame AOIMatcher object? if aoi_2d.contains_point(gaze_position.value): - inner_x, inner_y = aoi_2d.clockwise().inner_axis(gaze_position.value) + inner_x, inner_y = aoi_2d.clockwise().inner_axis(*gaze_position.value) # QUESTION: How to project gaze precision? inner_gaze_position = GazeFeatures.GazePosition((inner_x, inner_y)) diff --git a/src/argaze/AreaOfInterest/AOI2DScene.py b/src/argaze/AreaOfInterest/AOI2DScene.py index 73c977f..f6b8dcb 100644 --- a/src/argaze/AreaOfInterest/AOI2DScene.py +++ b/src/argaze/AreaOfInterest/AOI2DScene.py @@ -10,7 +10,7 @@ __license__ = "BSD" from typing import TypeVar, Tuple from argaze import DataStructures -from argaze.AreaOfInterest import AOIFeatures +from argaze.AreaOfInterest import AOIFeatures, AOI3DScene from argaze import GazeFeatures import cv2 @@ -19,6 +19,9 @@ import numpy AOI2DSceneType = TypeVar('AOI2DScene', bound="AOI2DScene") # Type definition for type annotation convenience +AOI3DSceneType = TypeVar('AOI3DScene', bound="AOI3DScene") +# Type definition for type annotation convenience + class AOI2DScene(AOIFeatures.AOIScene): """Define AOI 2D scene.""" @@ -89,6 +92,7 @@ class AOI2DScene(AOIFeatures.AOIScene): yield name, aoi, matched_region, aoi_ratio, circle_ratio + '''DEPRECATED: but maybe still usefull? def reframe(self, aoi: AOIFeatures.AreaOfInterest, size: tuple) -> AOI2DSceneType: """ Reframe whole scene to a scene bounded by a 4 vertices 2D AOI. @@ -120,3 +124,28 @@ class AOI2DScene(AOIFeatures.AOIScene): aoi2D_scene[name] = numpy.matmul(aoi2D - Src_origin, M.T) return aoi2D_scene + ''' + def dimensionalize(self, frame_3d: AOIFeatures.AreaOfInterest, size: tuple) -> AOI3DSceneType: + """ + Convert to 3D scene considering it is inside of 3D rectangular frame. + + Parameters: + aoi_frame_3d: rectangle 3D AOI to use as referential plane + size: size of the frame in pixel + + Returns: + AOI 3D scene + """ + + # Vectorize outter_axis function + vfunc = numpy.vectorize(frame_3d.outter_axis) + + # Prepare new AOI 3D scene + aoi3D_scene = AOI3DScene.AOI3DScene() + + for name, aoi2D in self.items(): + + X, Y = (aoi2D / size).T + aoi3D_scene[name] = numpy.array(vfunc(X, Y)).T.view(AOIFeatures.AreaOfInterest) + + return aoi3D_scene diff --git a/src/argaze/AreaOfInterest/AOI3DScene.py b/src/argaze/AreaOfInterest/AOI3DScene.py index 8ea6048..bfe189a 100644 --- a/src/argaze/AreaOfInterest/AOI3DScene.py +++ b/src/argaze/AreaOfInterest/AOI3DScene.py @@ -108,9 +108,9 @@ class AOI3DScene(AOIFeatures.AOIScene): file.close() - # retreive all aoi3D vertices + # retreive all aoi3D vertices and sort them in clockwise order for name, face in faces.items(): - aoi3D = AOIFeatures.AreaOfInterest([ vertices[i-1] for i in face ]) + aoi3D = AOIFeatures.AreaOfInterest([ vertices[i-1] for i in reversed(face) ]) aois_3d[name] = aoi3D except IOError: @@ -149,8 +149,9 @@ class AOI3DScene(AOIFeatures.AOIScene): file.write('s off\n') file.write(vertices_ids + '\n') + '''DEPRECATED: but maybe still usefull? @property - def orthogonal_projection(self) -> AOI2DScene.AOI2DScene: + def orthogonal_projection(self) -> AOI2DSceneType: """ Orthogonal projection of whole scene. @@ -169,7 +170,7 @@ class AOI3DScene(AOIFeatures.AOIScene): K = numpy.array([[scene_size[1]/scene_size[0], 0.0, 0.5], [0.0, 1., 0.5], [0.0, 0.0, 1.0]]) return self.project(tvec, rvec, K) - + ''' def vision_cone(self, cone_radius, cone_height, cone_tip=[0., 0., 0.], cone_direction=[0., 0., 1.]) -> Tuple[AOI3DSceneType, AOI3DSceneType]: """Get AOI which are inside and out a given cone field. diff --git a/src/argaze/AreaOfInterest/AOIFeatures.py b/src/argaze/AreaOfInterest/AOIFeatures.py index e5585c5..ffaf882 100644 --- a/src/argaze/AreaOfInterest/AOIFeatures.py +++ b/src/argaze/AreaOfInterest/AOIFeatures.py @@ -127,8 +127,8 @@ class AreaOfInterest(numpy.ndarray): return mpath.Path(self).contains_points([point])[0] - def inner_axis(self, point: tuple) -> tuple: - """Transform the coordinates from the global axis to the AOI's axis. + def inner_axis(self, x: float, y: float) -> tuple: + """Transform a point coordinates from global axis to AOI axis. !!! warning Available for 2D AOI only. !!! danger @@ -143,35 +143,30 @@ class AreaOfInterest(numpy.ndarray): Dst = numpy.array([[0., 0.], [1., 0.], [1., 1.], [0., 1.]]).astype(numpy.float32) P = cv2.getPerspectiveTransform(Src, Dst) - X = numpy.append(numpy.array(numpy.array(point) - Src_origin), [1.0]).astype(numpy.float32) + X = numpy.append(numpy.array(numpy.array([x, y]) - Src_origin), [1.0]).astype(numpy.float32) Y = numpy.dot(P, X) La = (Y/Y[2])[:-1] return tuple(numpy.around(La, 4)) - def outter_axis(self, point: tuple) -> tuple: - """Transform the coordinates from the AOI's axis to the global axis. - !!! warning - Available for 2D AOI only. + def outter_axis(self, x: float, y: float) -> tuple: + """Transform a point coordinates from AOI axis to global axis. !!! danger - The AOI points must be sorted in clockwise order.""" - - assert(self.dimension == 2) + The AOI points must be sorted in clockwise order. + !!! danger + The AOI must be a rectangle.""" - Src = numpy.array([[0., 0.], [1., 0.], [1., 1.], [0., 1.]]).astype(numpy.float32) + # Origin point + O = self[0] - Dst = self.astype(numpy.float32) - Dst_origin = Dst[0] - Dst = (Dst - Dst_origin).reshape((len(Dst)), 2) + # Horizontal axis vector + H = self[1] - self[0] - P = cv2.getPerspectiveTransform(Src, Dst) - X = numpy.array([point[0], point[1], 1.0]).astype(numpy.float32) - Y = numpy.dot(P, X) + # Vertical axis vector + V = self[3] - self[0] - Lp = Dst_origin + (Y/Y[2])[:-1] - - return tuple(numpy.rint(Lp).astype(int)) + return tuple(O + x * H + y * V) def circle_intersection(self, center: tuple, radius: float) -> Tuple[numpy.array, float, float]: """Get intersection shape with a circle, intersection area / AOI area ratio and intersection area / circle area ratio. @@ -353,6 +348,42 @@ class AOIScene(): return output + def __add__(self, add_vector) -> AOISceneType: + """Add vector to scene.""" + + assert(len(add_vector) == self.__dimension) + + for name, area in self.__areas.items(): + + self.__areas[name] = self.__areas[name] + add_vector + + return self + + # Allow n + scene operation + __radd__ = __add__ + + def __sub__(self, sub_vector) -> AOISceneType: + """Sub vector to scene.""" + + assert(len(sub_vector) == self.__dimension) + + for name, area in self.__areas.items(): + + self.__areas[name] = self.__areas[name] - sub_vector + + return self + + def __rsub__(self, rsub_vector) -> AOISceneType: + """RSub vector to scene.""" + + assert(len(rsub_vector) == self.__dimension) + + for name, area in self.__areas.items(): + + self.__areas[name] = rsub_vector - self.__areas[name] + + return self + def __mul__(self, scale_vector) -> AOISceneType: """Scale scene by a vector.""" @@ -367,6 +398,16 @@ class AOIScene(): # Allow n * scene operation __rmul__ = __mul__ + def __truediv__(self, div_vector) -> AOISceneType: + + assert(len(div_vector) == self.__dimension) + + for name, area in self.__areas.items(): + + self.__areas[name] = self.__areas[name] / div_vector + + return self + def items(self) -> Tuple[str, AreaOfInterest]: """Iterate over areas.""" diff --git a/src/argaze/utils/demo_data/aoi_2d_scene.json b/src/argaze/utils/demo_data/aoi_2d_scene.json new file mode 100644 index 0000000..a0726e8 --- /dev/null +++ b/src/argaze/utils/demo_data/aoi_2d_scene.json @@ -0,0 +1,5 @@ +{ + "RedSquare": [[268, 203], [576, 203], [576, 510], [268, 510]], + "BlueTriangle":[[960, 664], [1113, 971], [806, 971]], + "GreenCircle":[[1497, 203], [1527, 206], [1556, 215], [1582, 229], [1605, 248], [1624, 271], [1639, 298], [1647, 327], [1650, 357], [1647, 387], [1639, 415], [1624, 442], [1605, 465], [1582, 484], [1556, 498], [1527, 507], [1497, 510], [1467, 507], [1438, 498], [1411, 484], [1388, 465], [1369, 442], [1355, 415], [1346, 387], [1343, 357], [1346, 327], [1355, 298], [1369, 271], [1388, 248], [1411, 229], [1438, 215], [1467, 206]] +} \ No newline at end of file diff --git a/src/argaze/utils/demo_data/aoi_3d_scene.obj b/src/argaze/utils/demo_data/aoi_3d_scene.obj index d32e235..0ce97de 100644 --- a/src/argaze/utils/demo_data/aoi_3d_scene.obj +++ b/src/argaze/utils/demo_data/aoi_3d_scene.obj @@ -1,5 +1,3 @@ -# Blender v3.0.1 OBJ File: 'ar_camera.blend' -# www.blender.org o GrayRectangle v 0.000000 0.000000 0.000000 v 25.000000 0.000000 0.000000 @@ -7,51 +5,3 @@ v 0.000000 14.960000 0.000000 v 25.000000 14.960000 0.000000 s off f 1 2 4 3 -o RedSquare -v 3.497026 8.309391 0.000000 -v 7.504756 8.309391 0.000000 -v 3.497026 12.314838 0.001030 -v 7.504756 12.314838 0.001030 -s off -f 5 6 8 7 -o BlueTriangle -v 10.500295 2.307687 0.000000 -v 14.503224 2.306344 0.000000 -v 12.502419 6.312207 0.001030 -s off -f 9 10 11 -o GreenCircle -v 19.495552 12.311101 0.000000 -v 19.105371 12.272672 0.000000 -v 18.730185 12.158860 0.000000 -v 18.384411 11.974040 0.000000 -v 18.081339 11.725314 0.000000 -v 17.832613 11.422241 0.000000 -v 17.647793 11.076468 0.000000 -v 17.533981 10.701282 0.000000 -v 17.495552 10.311101 0.000000 -v 17.533981 9.920920 0.000000 -v 17.647793 9.545734 0.000000 -v 17.832613 9.199961 0.000000 -v 18.081339 8.896888 0.000000 -v 18.384411 8.648162 0.000000 -v 18.730185 8.463342 0.000000 -v 19.105371 8.349530 0.000000 -v 19.495552 8.311101 0.000000 -v 19.885733 8.349530 0.000000 -v 20.260920 8.463342 0.000000 -v 20.606693 8.648162 0.000000 -v 20.909765 8.896887 0.000000 -v 21.158491 9.199960 0.000000 -v 21.343311 9.545733 0.000000 -v 21.457123 9.920920 0.000000 -v 21.495552 10.311101 0.000000 -v 21.457123 10.701282 0.000000 -v 21.343311 11.076468 0.000000 -v 21.158491 11.422241 0.000000 -v 20.909765 11.725314 0.000000 -v 20.606693 11.974040 0.000000 -v 20.260920 12.158860 0.000000 -v 19.885733 12.272672 0.000000 -s off -f 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 12 diff --git a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json index 9a3b79f..5168297 100644 --- a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json +++ b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json @@ -53,7 +53,7 @@ }, "frames": { "GrayRectangle": { - "size": [640, 383], + "size": [1920, 1149], "background": "frame_background.jpg", "gaze_movement_identifier": { "DispersionThresholdIdentification": { @@ -65,12 +65,10 @@ "duration_max": 10000 }, "layers": { - "GrayRectangle": { - "aoi_scene": "aoi_3d_scene.obj", + "main_layer": { + "aoi_scene": "aoi_2d_scene.json", "aoi_matcher": { - "FocusPointInside": { - "exclude": ["GrayRectangle"] - } + "FocusPointInside": {} } } }, @@ -91,7 +89,7 @@ } }, "draw_layers": { - "GrayRectangle": { + "main_layer": { "draw_aoi_scene": { "draw_aoi": { "color": [255, 255, 255], diff --git a/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json b/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json index 414a6fe..52945ae 100644 --- a/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json +++ b/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json @@ -25,11 +25,10 @@ "size": [320, 240] }, "layers": { - "GrayRectangle": { - "aoi_scene": "aoi_3d_scene.obj", + "main_layer": { + "aoi_scene": "aoi_2d_scene.json", "aoi_matcher": { "DeviationCircleCoverage": { - "exclude": ["GrayRectangle"], "coverage_threshold": 0.5 } }, @@ -64,7 +63,7 @@ "deepness": 0 }, "draw_layers": { - "GrayRectangle": { + "main_layer": { "draw_aoi_scene": { "draw_aoi": { "color": [255, 255, 255], diff --git a/src/argaze/utils/demo_gaze_analysis_run.py b/src/argaze/utils/demo_gaze_analysis_run.py index 465c5db..789657b 100644 --- a/src/argaze/utils/demo_gaze_analysis_run.py +++ b/src/argaze/utils/demo_gaze_analysis_run.py @@ -74,18 +74,18 @@ def main(): # Write last 5 steps of aoi scan path path = '' - for step in ar_frame.layers["GrayRectangle"].aoi_scan_path[-5:]: + for step in ar_frame.layers["main_layer"].aoi_scan_path[-5:]: path += f'> {step.aoi} ' - path += f'> {ar_frame.layers["GrayRectangle"].aoi_scan_path.current_aoi}' + path += f'> {ar_frame.layers["main_layer"].aoi_scan_path.current_aoi}' cv2.putText(frame_image, path, (20, ar_frame.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) # Display Transition matrix analysis if loaded try: - transition_matrix_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.TransitionMatrix"] + transition_matrix_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.TransitionMatrix"] cv2.putText(frame_image, f'Transition matrix density: {transition_matrix_analyzer.transition_matrix_density:.2f}', (20, ar_frame.size[1]-160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) @@ -97,8 +97,8 @@ def main(): if from_aoi != to_aoi and probability > 0.0: - from_center = ar_frame.layers['GrayRectangle'].aoi_scene[from_aoi].center.astype(int) - to_center = ar_frame.layers['GrayRectangle'].aoi_scene[to_aoi].center.astype(int) + from_center = ar_frame.layers["main_layer"].aoi_scene[from_aoi].center.astype(int) + to_center = ar_frame.layers["main_layer"].aoi_scene[to_aoi].center.astype(int) start_line = (0.5 * from_center + 0.5 * to_center).astype(int) color = [int(probability*200) + 55, int(probability*200) + 55, int(probability*200) + 55] @@ -112,7 +112,7 @@ def main(): # Display aoi scan path basic metrics analysis if loaded try: - basic_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.Basic"] + basic_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.Basic"] # Write basic analysis cv2.putText(frame_image, f'Step number: {basic_analyzer.steps_number}', (20, ar_frame.size[1]-440), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) @@ -141,7 +141,7 @@ def main(): # Display aoi scan path K-modified coefficient analysis if loaded try: - aoi_kc_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.KCoefficient"] + aoi_kc_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.KCoefficient"] # Write aoi Kc analysis if aoi_kc_analyzer.K < 0.: @@ -158,7 +158,7 @@ def main(): # Display Lempel-Ziv complexity analysis if loaded try: - lzc_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.LempelZivComplexity"] + lzc_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.LempelZivComplexity"] cv2.putText(frame_image, f'Lempel-Ziv complexity: {lzc_analyzer.lempel_ziv_complexity}', (20, ar_frame.size[1]-200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) @@ -168,7 +168,7 @@ def main(): # Display N-Gram analysis if loaded try: - ngram_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.NGram"] + ngram_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.NGram"] # Display only 3-gram analysis start = ar_frame.size[1] - ((len(ngram_analyzer.ngrams_count[3]) + 1) * 40) @@ -188,7 +188,7 @@ def main(): # Display Entropy analysis if loaded try: - entropy_analyzer = ar_frame.layers['GrayRectangle'].aoi_scan_path_analyzers["argaze.GazeAnalysis.Entropy"] + entropy_analyzer = ar_frame.layers["main_layer"].aoi_scan_path_analyzers["argaze.GazeAnalysis.Entropy"] cv2.putText(frame_image, f'Stationary entropy: {entropy_analyzer.stationary_entropy:.3f},', (20, ar_frame.size[1]-280), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) cv2.putText(frame_image, f'Transition entropy: {entropy_analyzer.transition_entropy:.3f},', (20, ar_frame.size[1]-240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) -- cgit v1.1 From 23fa1a7835b3c7cfd976b1d160878289b1f0657c Mon Sep 17 00:00:00 2001 From: Theo De La Hogue Date: Sat, 23 Sep 2023 07:22:23 +0200 Subject: Fixing code annotation. Removing useless documentation section. Fixing documentation cross reference. --- .../ar_environment/environment_exploitation.md | 36 ----- .../user_guide/ar_environment/environment_setup.md | 77 ---------- docs/user_guide/ar_environment/introduction.md | 6 - .../areas_of_interest/aoi_scene_description.md | 83 ---------- .../areas_of_interest/aoi_scene_projection.md | 22 --- docs/user_guide/areas_of_interest/introduction.md | 2 +- .../aruco_markers/dictionary_selection.md | 17 --- docs/user_guide/aruco_markers/introduction.md | 15 -- docs/user_guide/aruco_markers/markers_creation.md | 17 --- docs/user_guide/aruco_markers/markers_detection.md | 47 ------ .../aruco_markers/markers_pose_estimation.md | 20 --- .../aruco_markers/markers_scene_description.md | 146 ------------------ .../optic_parameters_calibration.md | 8 +- .../configuration_and_execution.md | 6 +- .../aruco_markers_pipeline/introduction.md | 12 +- docs/user_guide/gaze_analysis/gaze_movement.md | 163 -------------------- docs/user_guide/gaze_analysis/gaze_position.md | 98 ------------ docs/user_guide/gaze_analysis/introduction.md | 7 - docs/user_guide/gaze_analysis/scan_path.md | 169 --------------------- .../advanced_topics/scripting.md | 2 +- .../gaze_analysis_pipeline/aoi_analysis.md | 4 +- .../gaze_analysis_pipeline/introduction.md | 2 +- .../pipeline_modules/aoi_matchers.md | 2 +- .../pipeline_modules/aoi_scan_path_analyzers.md | 2 +- docs/user_guide/gaze_features/gaze_movement.md | 163 ++++++++++++++++++++ docs/user_guide/gaze_features/gaze_position.md | 98 ++++++++++++ docs/user_guide/gaze_features/introduction.md | 7 + docs/user_guide/gaze_features/scan_path.md | 169 +++++++++++++++++++++ mkdocs.yml | 18 +-- src/argaze/ArFeatures.py | 8 +- src/argaze/ArUcoMarkers/ArUcoCamera.py | 6 +- src/argaze/AreaOfInterest/AOI2DScene.py | 9 +- 32 files changed, 475 insertions(+), 966 deletions(-) delete mode 100644 docs/user_guide/ar_environment/environment_exploitation.md delete mode 100644 docs/user_guide/ar_environment/environment_setup.md delete mode 100644 docs/user_guide/ar_environment/introduction.md delete mode 100644 docs/user_guide/areas_of_interest/aoi_scene_description.md delete mode 100644 docs/user_guide/areas_of_interest/aoi_scene_projection.md delete mode 100644 docs/user_guide/aruco_markers/dictionary_selection.md delete mode 100644 docs/user_guide/aruco_markers/introduction.md delete mode 100644 docs/user_guide/aruco_markers/markers_creation.md delete mode 100644 docs/user_guide/aruco_markers/markers_detection.md delete mode 100644 docs/user_guide/aruco_markers/markers_pose_estimation.md delete mode 100644 docs/user_guide/aruco_markers/markers_scene_description.md delete mode 100644 docs/user_guide/gaze_analysis/gaze_movement.md delete mode 100644 docs/user_guide/gaze_analysis/gaze_position.md delete mode 100644 docs/user_guide/gaze_analysis/introduction.md delete mode 100644 docs/user_guide/gaze_analysis/scan_path.md create mode 100644 docs/user_guide/gaze_features/gaze_movement.md create mode 100644 docs/user_guide/gaze_features/gaze_position.md create mode 100644 docs/user_guide/gaze_features/introduction.md create mode 100644 docs/user_guide/gaze_features/scan_path.md diff --git a/docs/user_guide/ar_environment/environment_exploitation.md b/docs/user_guide/ar_environment/environment_exploitation.md deleted file mode 100644 index 9e4b236..0000000 --- a/docs/user_guide/ar_environment/environment_exploitation.md +++ /dev/null @@ -1,36 +0,0 @@ -Environment exploitation -======================== - -Once loaded, [ArCamera](../../argaze.md/#argaze.ArFeatures.ArCamera) assets can be exploited as illustrated below: - -```python -# Access to AR environment ArUco detector passing it a image where to detect ArUco markers -ar_camera.aruco_detector.detect_markers(image) - -# Access to an AR environment scene -my_first_scene = ar_camera.scenes['my first AR scene'] - -try: - - # Try to estimate AR scene pose from detected markers - tvec, rmat, consistent_markers = my_first_scene.estimate_pose(ar_camera.aruco_detector.detected_markers) - - # Project AR scene into camera image according estimated pose - # Optional visual_hfov argument is set to 160° to clip AOI scene according a cone vision - aoi2D_scene = my_first_scene.project(tvec, rmat, visual_hfov=160) - - # Draw estimated AR scene axis - my_first_scene.draw_axis(image) - - # Draw AOI2D scene projection - aoi2D_scene.draw(image) - - # Do something with AOI2D scene projection - ... - -# Catch exceptions raised by estimate_pose and project methods -except (ArFeatures.PoseEstimationFailed, ArFeatures.SceneProjectionFailed) as e: - - print(e) - -``` diff --git a/docs/user_guide/ar_environment/environment_setup.md b/docs/user_guide/ar_environment/environment_setup.md deleted file mode 100644 index 1f26d26..0000000 --- a/docs/user_guide/ar_environment/environment_setup.md +++ /dev/null @@ -1,77 +0,0 @@ -Environment Setup -================= - -[ArCamera](../../argaze.md/#argaze.ArFeatures.ArCamera) setup is loaded from JSON file format. - -Each [ArCamera](../../argaze.md/#argaze.ArFeatures.ArCamera) defines a unique [ArUcoDetector](../../argaze.md/#argaze.ArUcoMarkers.ArUcoDetector.ArUcoDetector) dedicated to detection of markers from a specific [ArUcoMarkersDictionary](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarkersDictionary) and with a given size. However, it is possible to load multiple [ArScene](../../argaze.md/#argaze.ArFeatures.ArScene) into a same [ArCamera](../../argaze.md/#argaze.ArFeatures.ArCamera). - -Here is JSON environment file example where it is assumed that mentioned .obj files are located relatively to the environment file on disk. - -``` -{ - "name": "my AR environment", - "aruco_detector": { - "dictionary": { - "name": "DICT_APRILTAG_16h5" - } - "marker_size": 5, - "optic_parameters": { - "rms": 0.6, - "dimensions": [ - 1920, - 1080 - ], - "K": [ - [ - 1135, - 0.0, - 956 - ], - [ - 0.0, - 1135, - 560 - ], - [ - 0.0, - 0.0, - 1.0 - ] - ], - "D": [ - 0.01655492265003404, - 0.1985524264972037, - 0.002129965902489484, - -0.0019528582922179365, - -0.5792910353639452 - ] - }, - "parameters": { - "cornerRefinementMethod": 3, - "aprilTagQuadSigma": 2, - "aprilTagDeglitch": 1 - } - }, - "scenes": { - "my first AR scene" : { - "aruco_markers_group": "./first_scene/markers.obj", - "aoi_scene": "./first_scene/aoi.obj", - "angle_tolerance": 15.0, - "distance_tolerance": 2.54 - }, - "my second AR scene" : { - "aruco_markers_group": "./second_scene/markers.obj", - "aoi_scene": "./second_scene/aoi.obj", - "angle_tolerance": 15.0, - "distance_tolerance": 2.54 - } - } -} -``` - -```python -from argaze import ArFeatures - -# Load AR environment -ar_camera = ArFeatures.ArCamera.from_json('./environment.json') -``` diff --git a/docs/user_guide/ar_environment/introduction.md b/docs/user_guide/ar_environment/introduction.md deleted file mode 100644 index b19383b..0000000 --- a/docs/user_guide/ar_environment/introduction.md +++ /dev/null @@ -1,6 +0,0 @@ -AR environment setup -==================== - -ArGaze toolkit eases ArUco and AOI management in a single AR environment setup. - -This section refers to [ArFeatures](../../argaze.md/#argaze.ArFeatures). diff --git a/docs/user_guide/areas_of_interest/aoi_scene_description.md b/docs/user_guide/areas_of_interest/aoi_scene_description.md deleted file mode 100644 index b96c1e0..0000000 --- a/docs/user_guide/areas_of_interest/aoi_scene_description.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: AOI scene description ---- - -AOI scene description -===================== - -## 2D description - -An AOI scene can be described in 2D dimension using an [AOI2DScene](../../argaze.md/#argaze.AreaOfInterest.AOI2DScene) from a dictionary description. - -``` dict -{ - "tracking": [[672.0, 54.0], [1632.0, 54.0], [1632.0, 540.0], [672.0, 540.0]], - "system": [[0.0, 54.0], [672.0, 54.0], [672.0, 540.0], [0.0, 540.0]], - "communications": [[0.0, 594.0], [576.0, 594.0], [576.0, 1080.0], [0.0, 1080.0]], - "resources": [[576.0, 594.0], [1632.0, 594.0], [1632.0, 1080.0], [576.0, 1080.0]] -} -... -``` - -Here is a sample of code to show the loading of an [AOI2DScene](../../argaze.md/#argaze.AreaOfInterest.AOI2DScene) from a dictionary description: - - -``` python -from argaze.AreaOfInterest import AOI2DScene - -# Load an AOI2D scene from dictionary -aoi_2d_scene = AOI2DScene.AOI2DScene(aoi_scene_dictionary) -``` - -## 3D description - -An AOI scene can be described in 3D dimension using an [AOI3DScene](../../argaze.md/#argaze.AreaOfInterest.AOI3DScene) built from a 3D model with all AOI as 3D planes and loaded through OBJ file format. -Notice that plane normals are not needed and planes are not necessary 4 vertices shapes. - -``` obj -o PIC_ND -v 6.513238 -27.113548 -25.163900 -v 22.994461 -27.310783 -24.552130 -v 6.718690 -6.467261 -26.482569 -v 23.252594 -6.592890 -25.873484 -f 1 2 4 3 -o PIC_ND_Aircraft -v 6.994747 -21.286463 -24.727146 -v 22.740919 -21.406120 -24.147078 -v 7.086208 -12.096219 -25.314123 -v 22.832380 -12.215876 -24.734055 -f 5 6 8 7 -o PIC_ND_Wind -v 7.086199 -11.769333 -25.335127 -v 12.081032 -11.807289 -25.151123 -v 7.115211 -8.854101 -25.521320 -v 12.110044 -8.892057 -25.337317 -f 9 10 12 11 -o PIC_ND_Waypoint -v 17.774197 -11.819057 -24.943428 -v 22.769030 -11.857013 -24.759424 -v 17.803209 -8.903825 -25.129622 -v 22.798042 -8.941781 -24.945618 -f 13 14 16 15 -... -o Thrust_Lever -v 19.046124 15.523837 4.774072 -v 18.997263 -0.967944 5.701000 -v 18.988382 15.923470 -13.243046 -v 18.921808 -0.417994 -17.869610 -v 19.032232 19.241346 -3.040264 -v 19.020988 6.392717 5.872663 -v 18.945322 6.876906 -17.699480 -s off -f 185 190 186 188 191 187 189 -... -``` - -Here is a sample of code to show the loading of an [AOI3DScene](../../argaze.md/#argaze.AreaOfInterest.AOI3DScene) from an OBJ file description: - -``` python -from argaze.AreaOfInterest import AOI3DScene - -# Load an AOI3D scene from OBJ file -aoi_3d_scene = AOI3DScene.AOI3DScene.from_obj('./aoi_scene.obj') -``` diff --git a/docs/user_guide/areas_of_interest/aoi_scene_projection.md b/docs/user_guide/areas_of_interest/aoi_scene_projection.md deleted file mode 100644 index f348c6c..0000000 --- a/docs/user_guide/areas_of_interest/aoi_scene_projection.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: AOI scene projection ---- - -AOI scene projection -==================== - -An [AOI3DScene](../../argaze.md/#argaze.AreaOfInterest.AOI3DScene) can be rotated and translated according to a pose estimation before to project it onto camera image as an [AOI2DScene](../../argaze.md/#argaze.AreaOfInterest.AOI2DScene). - -![AOI projection](../../img/aoi_projection.png) - -``` python -... - -# Assuming pose estimation is done (tvec and rmat) - -# Project AOI 3D scene according pose estimation and optic parameters -aoi2D_scene = aoi3D_scene.project(tvec, rmat, optic_parameters.K) - -# Draw AOI 2D scene -aoi2D_scene.draw(image) -``` diff --git a/docs/user_guide/areas_of_interest/introduction.md b/docs/user_guide/areas_of_interest/introduction.md index 6f74dd4..9467963 100644 --- a/docs/user_guide/areas_of_interest/introduction.md +++ b/docs/user_guide/areas_of_interest/introduction.md @@ -1,7 +1,7 @@ About Areas Of Interest (AOI) ============================= -The [AreaOfInterest submodule](../../argaze.md/#argaze.AreaOfInterest) allows to deal with AOI in a AR environment through a set of high level classes: +The [AreaOfInterest submodule](../../argaze.md/#argaze.AreaOfInterest) allows to deal with AOI through a set of high level classes: * [AOIFeatures](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures) * [AOI3DScene](../../argaze.md/#argaze.AreaOfInterest.AOI3DScene) diff --git a/docs/user_guide/aruco_markers/dictionary_selection.md b/docs/user_guide/aruco_markers/dictionary_selection.md deleted file mode 100644 index b9ba510..0000000 --- a/docs/user_guide/aruco_markers/dictionary_selection.md +++ /dev/null @@ -1,17 +0,0 @@ -Dictionary selection -==================== - -ArUco markers always belongs to a set of markers called ArUco markers dictionary. - -![ArUco dictionaries](../../img/aruco_dictionaries.png) - -Many ArUco dictionaries exist with properties concerning the format, the number of markers or the difference between each markers to avoid error in tracking. - -Here is the documention [about ArUco markers dictionaries](https://docs.opencv.org/3.4/d9/d6a/group__aruco.html#gac84398a9ed9dd01306592dd616c2c975). - -``` python -from argaze.ArUcoMarkers import ArUcoMarkersDictionary - -# Create a dictionary of specific April tags -aruco_dictionary = ArUcoMarkersDictionary.ArUcoMarkersDictionary('DICT_APRILTAG_16h5') -``` diff --git a/docs/user_guide/aruco_markers/introduction.md b/docs/user_guide/aruco_markers/introduction.md deleted file mode 100644 index 9d78de0..0000000 --- a/docs/user_guide/aruco_markers/introduction.md +++ /dev/null @@ -1,15 +0,0 @@ -About ArUco markers -=================== - -![OpenCV ArUco markers](https://pyimagesearch.com/wp-content/uploads/2020/12/aruco_generate_tags_header.png) - -The OpenCV library provides a module to detect fiducial markers into a picture and estimate its pose (cf [OpenCV ArUco tutorial page](https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html)). - -The ArGaze [ArUcoMarkers submodule](../../argaze.md/#argaze.ArUcoMarkers) eases markers creation, camera calibration, markers detection and 3D scene pose estimation through a set of high level classes: - -* [ArUcoMarkersDictionary](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarkersDictionary) -* [ArUcoMarkers](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarker) -* [ArUcoBoard](../../argaze.md/#argaze.ArUcoMarkers.ArUcoBoard) -* [ArUcoOpticCalibrator](../../argaze.md/#argaze.ArUcoMarkers.ArUcoOpticCalibrator) -* [ArUcoDetector](../../argaze.md/#argaze.ArUcoMarkers.ArUcoDetector) -* [ArUcoMarkersGroup](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarkersGroup) \ No newline at end of file diff --git a/docs/user_guide/aruco_markers/markers_creation.md b/docs/user_guide/aruco_markers/markers_creation.md deleted file mode 100644 index eab9890..0000000 --- a/docs/user_guide/aruco_markers/markers_creation.md +++ /dev/null @@ -1,17 +0,0 @@ -Markers creation -================ - -The creation of [ArUcoMarkers](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarker) from a dictionary is illustrated in the code below: - -``` python -from argaze.ArUcoMarkers import ArUcoMarkersDictionary - -# Create a dictionary of specific April tags -aruco_dictionary = ArUcoMarkersDictionary.ArUcoMarkersDictionary('DICT_APRILTAG_16h5') - -# Export marker n°5 as 3.5 cm picture with 300 dpi resolution -aruco_dictionary.create_marker(5, 3.5).save('./markers/', 300) - -# Export all dictionary markers as 3.5 cm pictures with 300 dpi resolution -aruco_dictionary.save('./markers/', 3.5, 300) -``` \ No newline at end of file diff --git a/docs/user_guide/aruco_markers/markers_detection.md b/docs/user_guide/aruco_markers/markers_detection.md deleted file mode 100644 index af2fb4f..0000000 --- a/docs/user_guide/aruco_markers/markers_detection.md +++ /dev/null @@ -1,47 +0,0 @@ -Markers detection -================= - -![Detected markers](../../img/detected_markers.png) - -Firstly, the [ArUcoDetector](../../argaze.md/#argaze.ArUcoMarkers.ArUcoDetector.ArUcoDetector) needs to know the expected dictionary and size (in centimeter) of the [ArUcoMarkers](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarker) it have to detect. - -Notice that extra parameters are passed to detector: see [OpenCV ArUco markers detection parameters documentation](https://docs.opencv.org/4.x/d1/dcd/structcv_1_1aruco_1_1DetectorParameters.html) to know more. - -``` python -from argaze.ArUcoMarkers import ArUcoDetector, ArUcoOpticCalibrator - -# Assuming camera calibration data are loaded - -# Loading extra detector parameters -extra_parameters = ArUcoDetector.DetectorParameters.from_json('./detector_parameters.json') - -# Create ArUco detector to track DICT_APRILTAG_16h5 5cm length markers -aruco_detector = ArUcoDetector.ArUcoDetector(optic_parameters=optic_parameters, dictionary='DICT_APRILTAG_16h5', marker_size=5, parameters=extra_parameters) -``` - -Here is [DetectorParameters](../../argaze.md/#argaze.ArUcoMarkers.ArUcoDetector.DetectorParameters) JSON file example: - -``` -{ - "cornerRefinementMethod": 1, - "aprilTagQuadSigma": 2, - "aprilTagDeglitch": 1 -} -``` - -The [ArUcoDetector](../../argaze.md/#argaze.ArUcoMarkers.ArUcoDetector.ArUcoDetector) processes image to detect markers and allows to draw detection results onto it: - -``` python -# Detect markers into image and draw them -aruco_detector.detect_markers(image) -aruco_detector.draw_detected_markers(image) - -# Get corners position into image related to each detected markers -for marker_id, marker in aruco_detector.detected_markers.items(): - - print(f'marker {marker_id} corners: ', marker.corners) - - # Do something with detected marker i corners - ... - -``` diff --git a/docs/user_guide/aruco_markers/markers_pose_estimation.md b/docs/user_guide/aruco_markers/markers_pose_estimation.md deleted file mode 100644 index 487c220..0000000 --- a/docs/user_guide/aruco_markers/markers_pose_estimation.md +++ /dev/null @@ -1,20 +0,0 @@ -Markers pose estimation -======================= - -After [ArUcoMarkers](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarker) detection, it is possible to estimate [ArUcoMarkers](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarker) pose in camera axis. - -![Pose estimation](../../img/pose_estimation.png) - -``` python -# Estimate markers pose -aruco_detector.estimate_markers_pose() - -# Get pose estimation related to each detected markers -for marker_id, marker in aruco_detector.detected_markers.items(): - - print(f'marker {marker_id} translation: ', marker.translation) - print(f'marker {marker_id} rotation: ', marker.rotation) - - # Do something with each marker pose estimation - ... -``` \ No newline at end of file diff --git a/docs/user_guide/aruco_markers/markers_scene_description.md b/docs/user_guide/aruco_markers/markers_scene_description.md deleted file mode 100644 index c6dbf31..0000000 --- a/docs/user_guide/aruco_markers/markers_scene_description.md +++ /dev/null @@ -1,146 +0,0 @@ -Markers scene description -========================= - -The ArGaze toolkit provides [ArUcoMarkersGroup](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarkersGroup) class to describe where [ArUcoMarkers](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarker) are placed into a 3D model. - -![ArUco scene](../../img/aruco_markers_group.png) - -[ArUcoMarkersGroup](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarkersGroup) is useful to: - -* filter markers that belongs to this predefined scene, -* check the consistency of detected markers according the place where each marker is expected to be, -* estimate the pose of the scene from the pose of detected markers. - -## Scene creation - -### from OBJ - -ArUco scene description uses common OBJ file format that can be exported from most 3D editors. Notice that plane normals (vn) needs to be exported. - -``` obj -o DICT_APRILTAG_16h5#0_Marker -v -3.004536 0.022876 2.995370 -v 2.995335 -0.015498 3.004618 -v -2.995335 0.015498 -3.004618 -v 3.004536 -0.022876 -2.995370 -vn 0.0064 1.0000 -0.0012 -s off -f 1//1 2//1 4//1 3//1 -o DICT_APRILTAG_16h5#1_Marker -v -33.799068 46.450645 -32.200436 -v -27.852505 47.243549 -32.102116 -v -34.593925 52.396473 -32.076626 -v -28.647360 53.189377 -31.978306 -vn -0.0135 -0.0226 0.9997 -s off -f 5//2 6//2 8//2 7//2 -... -``` - -Here is a sample of code to show the loading of an [ArUcoMarkersGroup](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarkersGroup) OBJ file description: - -``` python -from argaze.ArUcoMarkers import ArUcoMarkersGroup - -# Create an ArUco scene from a OBJ file description -aruco_markers_group = ArUcoMarkersGroup.ArUcoMarkersGroup.from_obj('./markers.obj') - -# Print loaded marker places -for place_id, place in aruco_markers_group.places.items(): - - print(f'place {place_id} for marker: ', place.marker.identifier) - print(f'place {place_id} translation: ', place.translation) - print(f'place {place_id} rotation: ', place.rotation) -``` - -### from JSON - -[ArUcoMarkersGroup](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarkersGroup) description can also be written in a JSON file format. - -``` json -{ - "dictionary": "DICT_ARUCO_ORIGINAL", - "marker_size": 1, - "places": { - "0": { - "translation": [0, 0, 0], - "rotation": [0, 0, 0] - }, - "1": { - "translation": [10, 10, 0], - "rotation": [0, 0, 0] - }, - "2": { - "translation": [0, 10, 0], - "rotation": [0, 0, 0] - } - } -} -``` - -### from detected markers - -Here is a more advanced usage where ArUco scene is built from markers detected into an image: - -``` python -from argaze.ArUcoMarkers import ArUcoMarkersGroup - -# Assuming markers have been detected and their pose estimated thanks to ArUcoDetector -... - -# Build ArUco scene from detected markers -aruco_markers_group = ArUcoMarkersGroup.ArUcoMarkersGroup(aruco_detector.marker_size, aruco_detector.dictionary, aruco_detector.detected_markers) -``` - -## Markers filtering - -Considering markers are detected, here is how to filter them to consider only those which belongs to the scene: - -``` python -scene_markers, remaining_markers = aruco_markers_group.filter_markers(aruco_detector.detected_markers) -``` - -## Marker poses consistency - -Then, scene markers poses can be validated by verifying their spatial consistency considering angle and distance tolerance. This is particularly useful to discard ambiguous marker pose estimations when markers are parallel to camera plane (see [issue on OpenCV Contribution repository](https://github.com/opencv/opencv_contrib/issues/3190#issuecomment-1181970839)). - -``` python -# Check scene markers consistency with 10° angle tolerance and 1 cm distance tolerance -consistent_markers, unconsistent_markers, unconsistencies = aruco_markers_group.check_markers_consistency(scene_markers, 10, 1) -``` - -## Scene pose estimation - -Several approaches are available to perform [ArUcoMarkersGroup](../../argaze.md/#argaze.ArUcoMarkers.ArUcoMarkersGroup) pose estimation from markers belonging to the scene. - -The first approach considers that scene pose can be estimated **from a single marker pose**: - -``` python -# Let's select one consistent scene marker -marker_id, marker = consistent_markers.popitem() - -# Estimate scene pose from a single marker -tvec, rmat = self.aruco_markers_group.estimate_pose_from_single_marker(marker) -``` - -The second approach considers that scene pose can be estimated by **averaging several marker poses**: - -``` python -# Estimate scene pose from all consistent scene markers -tvec, rmat = self.aruco_markers_group.estimate_pose_from_markers(consistent_markers) -``` - -The third approach is only available when ArUco markers are placed in such a configuration that is possible to **define orthogonal axis**: - -``` python -tvec, rmat = self.aruco_markers_group.estimate_pose_from_axis_markers(origin_marker, horizontal_axis_marker, vertical_axis_marker) -``` - -## Scene exportation - -As ArUco scene can be exported to OBJ file description to import it into most 3D editors. - -``` python -# Export an ArUco scene as OBJ file description -aruco_markers_group.to_obj('markers.obj') -``` diff --git a/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md b/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md index 455d95a..fbe06d1 100644 --- a/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md +++ b/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md @@ -3,11 +3,11 @@ Calibrate optic parameters A camera device have to be calibrated to compensate its optical distorsion. -![Optic parameters calibration](../../img/optic_calibration.png) +![Optic parameters calibration](../../../img/optic_calibration.png) ## Print calibration board -The first step to calibrate a camera is to create an [ArUcoBoard](../../argaze.md/#argaze.ArUcoMarkers.ArUcoBoard) like in the code below: +The first step to calibrate a camera is to create an [ArUcoBoard](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoBoard) like in the code below: ``` python from argaze.ArUcoMarkers import ArUcoMarkersDictionary, ArUcoBoard @@ -29,9 +29,9 @@ Let's print the calibration board before to go further. ## Capture board pictures -Then, the calibration process needs to make many different captures of an [ArUcoBoard](../../argaze.md/#argaze.ArUcoMarkers.ArUcoBoard) through the camera and then, pass them to an [ArUcoDetector](../../argaze.md/#argaze.ArUcoMarkers.ArUcoDetector.ArUcoDetector) instance to detect board corners and store them as calibration data into an [ArUcoOpticCalibrator](../../argaze.md/#argaze.ArUcoMarkers.ArUcoOpticCalibrator) for final calibration process. +Then, the calibration process needs to make many different captures of an [ArUcoBoard](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoBoard) through the camera and then, pass them to an [ArUcoDetector](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoDetector.ArUcoDetector) instance to detect board corners and store them as calibration data into an [ArUcoOpticCalibrator](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoOpticCalibrator) for final calibration process. -![Calibration step](../../img/optic_calibration_step.png) +![Calibration step](../../../img/optic_calibration_step.png) The sample of code below illustrates how to: diff --git a/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md b/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md index 81c577f..35b64f7 100644 --- a/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md +++ b/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md @@ -3,7 +3,7 @@ Load and execute pipeline Once [ArUco markers are placed into a scene](aruco_markers_description.md), they can be detected thanks to [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) class. -As [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) inherits from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame), the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) class also benefits from all the services described in [gaze analysis pipeline section](./user_guide/gaze_analysis_pipeline/introduction.md). +As [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) inherits from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame), the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) class also benefits from all the services described in [gaze analysis pipeline section](../gaze_analysis_pipeline/introduction.md). ![ArUco camera frame](../../img/aruco_camera_frame.png) @@ -89,7 +89,7 @@ The first [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline step de ### Image parameters - *inherited from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame)* -The usual [ArFrame visualisation parameters](./user_guide/gaze_analysis_pipeline/visualisation.md) plus one additional *draw_detected_markers* field. +The usual [ArFrame visualisation parameters](../gaze_analysis_pipeline/visualisation.md) plus one additional *draw_detected_markers* field. ## Pipeline execution @@ -119,7 +119,7 @@ Pass each camera image to [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures ### Analyse timestamped gaze positions into camera frame -As mentioned above, [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) inherits from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) and so, benefits from all the services described in [gaze analysis pipeline section](./user_guide/gaze_analysis_pipeline/introduction.md). +As mentioned above, [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) inherits from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) and so, benefits from all the services described in [gaze analysis pipeline section](../gaze_analysis_pipeline/introduction.md). Particularly, timestamped gaze positions can be passed one by one to [ArUcoCamera.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method to execute the whole pipeline dedicated to gaze analysis. diff --git a/docs/user_guide/aruco_markers_pipeline/introduction.md b/docs/user_guide/aruco_markers_pipeline/introduction.md index 836569a..f781fe8 100644 --- a/docs/user_guide/aruco_markers_pipeline/introduction.md +++ b/docs/user_guide/aruco_markers_pipeline/introduction.md @@ -11,7 +11,7 @@ The ArGaze [ArUcoMarkers submodule](../../argaze.md/#argaze.ArUcoMarkers) eases First, let's look at the schema below: it gives an overview of the main notions involved in the following chapters. -![ArUco markers pipeline](../../img/aruco_markers_pipeline.png) + To build your own ArUco markers pipeline, you need to know: @@ -19,11 +19,11 @@ To build your own ArUco markers pipeline, you need to know: * [How to describe scene's AOI](aoi_description.md), * [How to load and execute ArUco markers pipeline](configuration_and_execution.md), * [How to estimate scene pose](pose_estimation.md), -* [How to project AOI into camera frame](aoi_projection.md), -* [How to visualize ArUcoCamera and ArUcoScenes](visualisation.md) +* [How to project AOI into camera frame](aoi_projection.md) + More advanced features are also explained like: -* [How to script ArUco markers pipeline](advanced_topics/scripting.md) -* [How to calibrate optic parameters](optic_parameters_calibration.md) -* [How to improve ArUco markers detection](advanced_topics/aruco_detector_configuration.md) + +* [How to calibrate optic parameters](advanced_topics/optic_parameters_calibration.md) + diff --git a/docs/user_guide/gaze_analysis/gaze_movement.md b/docs/user_guide/gaze_analysis/gaze_movement.md deleted file mode 100644 index 83f67e1..0000000 --- a/docs/user_guide/gaze_analysis/gaze_movement.md +++ /dev/null @@ -1,163 +0,0 @@ -Gaze movement -============= - -## Definition - -!!! note - - *"The act of classifying eye movements into distinct events is, on a general level, driven by a desire to isolate different intervals of the data stream strongly correlated with certain oculomotor or cognitive properties."* - - Citation from ["One algorithm to rule them all? An evaluation and discussion of ten eye movement event-detection algorithms"](https://link.springer.com/article/10.3758/s13428-016-0738-9) article. - -[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines abstract [GazeMovement](../../argaze.md/#argaze.GazeFeatures.GazeMovement) class, then abstract [Fixation](../../argaze.md/#argaze.GazeFeatures.Fixation) and [Saccade](../../argaze.md/#argaze.GazeFeatures.Saccade) classes which inherit from [GazeMovement](../../argaze.md/#argaze.GazeFeatures.GazeMovement). - -The **positions** [GazeMovement](../../argaze.md/#argaze.GazeFeatures.GazeMovement) attribute contain all [GazePositions](../../argaze.md/#argaze.GazeFeatures.GazePosition) belonging to itself. - -![Fixation and Saccade](../../img/fixation_and_saccade.png) - -## Identification - -[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines abstract [GazeMovementIdentifier](../../argaze.md/#argaze.GazeFeatures.GazeMovementIdentifier) classe to let add various identification algorithms. - -Some gaze movement identification algorithms are available thanks to [GazeAnalysis](../../argaze.md/#argaze.GazeAnalysis) submodule: - -* [Dispersion threshold identification (I-DT)](../../argaze.md/#argaze.GazeAnalysis.DispersionThresholdIdentification) -* [Velocity threshold identification (I-VT)](../../argaze.md/#argaze.GazeAnalysis.VelocityThresholdIdentification) - -### Identify method - -[GazeMovementIdentifier.identify](../../argaze.md/#argaze.GazeFeatures.GazeMovementIdentifier.identify) method allows to fed its identification algorithm with successive gaze positions to output Fixation, Saccade or any kind of GazeMovement instances. - -Here is a sample of code based on [I-DT](../../argaze.md/#argaze.GazeAnalysis.DispersionThresholdIdentification) algorithm to illustrate how to use it: - -``` python -from argaze import GazeFeatures -from argaze.GazeAnalysis import DispersionThresholdIdentification - -# Create a gaze movement identifier based on dispersion algorithm with 50px max deviation 200 ms max duration thresholds -gaze_movement_identifier = DispersionThresholdIdentification.GazeMovementIdentifier(50, 200) - -# Assuming that timestamped gaze positions are provided through live stream or later data reading -...: - - gaze_movement = gaze_movement_identifier.identify(timestamp, gaze_position) - - # Fixation identified - if GazeFeatures.is_fixation(gaze_movement): - - # Access to first gaze position of identified fixation - start_ts, start_position = gaze_movement.positions.first - - # Access to fixation duration - print('duration: {gaze_movement.duration}') - - # Iterate over all gaze positions of identified fixation - for ts, position in gaze_movement.positions.items(): - - # Do something with each fixation position - ... - - # Saccade identified - elif GazeFeatures.is_saccade(gaze_movement): - - # Access to first gaze position of identified saccade - start_ts, start_position = gaze_movement.positions.first - - # Access to saccade amplitude - print('amplitude: {gaze_movement.amplitude}') - - # Iterate over all gaze positions of identified saccade - for ts, position in gaze_movement.positions.items(): - - # Do something with each saccade position - ... - - # No gaze movement identified - else: - - continue - -``` - -### Browse method - -[GazeMovementIdentifier.browse](../../argaze.md/#argaze.GazeFeatures.GazeMovementIdentifier.browse) method allows to pass a [TimeStampedGazePositions](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazePositions) buffer to apply identification algorithm on all gaze positions inside. - -Identified gaze movements are returned through: - -* [TimeStampedGazeMovements](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeMovements) instance where all fixations are stored by starting gaze position timestamp. -* [TimeStampedGazeMovements](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeMovements) instance where all saccades are stored by starting gaze position timestamp. -* [TimeStampedGazeStatus](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeStatus) instance where all gaze positions are linked to a fixation or saccade index. - -``` python -# Assuming that timestamped gaze positions are provided through data reading - -ts_fixations, ts_saccades, ts_status = gaze_movement_identifier.browse(ts_gaze_positions) - -``` - -* ts_fixations would look like: - -|timestamp|positions |duration|dispersion|focus | -|:--------|:-------------------------------------------------------------|:-------|:---------|:--------| -|60034 |{"60034":[846,620], "60044":[837,641], "60054":[835,649], ...}|450 |40 |(840,660)| -|60504 |{"60504":[838,667], "60514":[838,667], "60524":[837,669], ...}|100 |38 |(834,651)| -|... |... |... |.. |... | - -* ts_saccades would look like: - -|timestamp|positions |duration| -|:--------|:---------------------------------------|:-------| -|60484 |{"60484":[836, 669], "60494":[837, 669]}|10 | -|60594 |{"60594":[833, 613], "60614":[927, 601]}|20 | -|... |... |... | - -* ts_status would look like: - -|timestamp|position |type |index| -|:--------|:---------|:-------|:----| -|60034 |(846, 620)|Fixation|1 | -|60044 |(837, 641)|Fixation|1 | -|... |... |... |. | -|60464 |(836, 668)|Fixation|1 | -|60474 |(836, 668)|Fixation|1 | -|60484 |(836, 669)|Saccade |1 | -|60494 |(837, 669)|Saccade |1 | -|60504 |(838, 667)|Fixation|2 | -|60514 |(838, 667)|Fixation|2 | -|... |... |... |. | -|60574 |(825, 629)|Fixation|2 | -|60584 |(829, 615)|Fixation|2 | -|60594 |(833, 613)|Saccade |2 | -|60614 |(927, 601)|Saccade |2 | -|60624 |(933, 599)|Fixation|3 | -|60634 |(934, 603)|Fixation|3 | -|... |... |... |. | - - -!!! note - [TimeStampedGazeMovements](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeMovements), [TimeStampedGazeMovements](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeMovements) and [TimeStampedGazeStatus](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeStatus) classes inherit from [TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) class. - - Read [Timestamped data](../timestamped_data/introduction.md) section to understand all features it provides. - -### Generator method - -[GazeMovementIdentifier](../../argaze.md/#argaze.GazeFeatures.GazeMovementIdentifier) can be called with a [TimeStampedGazePositions](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazePositions) buffer in argument to generate gaze movement each time one is identified. - -``` python -# Assuming that timestamped gaze positions are provided through data reading - -for ts, gaze_movement in gaze_movement_identifier(ts_gaze_positions): - - # Fixation identified - if GazeFeatures.is_fixation(gaze_movement): - - # Do something with each fixation - ... - - # Saccade identified - elif GazeFeatures.is_saccade(gaze_movement): - - # Do something with each saccade - ... -``` \ No newline at end of file diff --git a/docs/user_guide/gaze_analysis/gaze_position.md b/docs/user_guide/gaze_analysis/gaze_position.md deleted file mode 100644 index 48495b4..0000000 --- a/docs/user_guide/gaze_analysis/gaze_position.md +++ /dev/null @@ -1,98 +0,0 @@ -Gaze position -============= - -[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines a [GazePosition](../../argaze.md/#argaze.GazeFeatures.GazePosition) class to handle point coordinates with a precision value. - -``` python -from argaze import GazeFeatures - -# Define a basic gaze position -gaze_position = GazeFeatures.GazePosition((123, 456)) - -# Define a gaze position with a precision value -gaze_position = GazeFeatures.GazePosition((789, 765), precision=10) - -# Access to gaze position value and precision -print(f'position: {gaze_position.value}') -print(f'precision: {gaze_position.precision}') - -``` - -## Validity - -[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines also a [UnvalidGazePosition](../../argaze.md/#argaze.GazeFeatures.UnvalidGazePosition) class that inherits from [GazePosition](../../argaze.md/#argaze.GazeFeatures.GazePosition) to handle case where no gaze position exists because of any specific device reason. - -``` python -from argaze import GazeFeatures - -# Define a basic unvalid gaze position -gaze_position = GazeFeatures.UnvalidGazePosition() - -# Define a basic unvalid gaze position with a message value -gaze_position = GazeFeatures.UnvalidGazePosition("Something bad happened") - -# Access to gaze position validity -print(f'validity: {gaze_position.valid}') - -``` - -## Distance - -[GazePosition](../../argaze.md/#argaze.GazeFeatures.GazePosition) class provides a **distance** method to calculate the distance to another gaze position instance. - -![Distance](../../img/distance.png) - -``` python -# Distance between A and B positions -d = gaze_position_A.distance(gaze_position_B) -``` - -## Overlapping - -[GazePosition](../../argaze.md/#argaze.GazeFeatures.GazePosition) class provides an **overlap** method to test if a gaze position overlaps another one considering their precisions. - -![Gaze overlapping](../../img/overlapping.png) - -``` python -# Check that A overlaps B -if gaze_position_A.overlap(gaze_position_B): - - # Do something if A overlaps B - ... - -# Check that A overlaps B and B overlaps A -if gaze_position_A.overlap(gaze_position_B, both=True): - - # Do something if A overlaps B AND B overlaps A - ... -``` - -## Timestamped gaze positions - -[TimeStampedGazePositions](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazePositions) inherits from [TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) class to handle especially gaze positions. - -### Import from dataframe - -It is possible to load timestamped gaze positions from a [Pandas DataFrame](https://pandas.pydata.org/docs/getting_started/intro_tutorials/01_table_oriented.html#min-tut-01-tableoriented) object. - -```python -import pandas - -# Load gaze positions from a CSV file into Panda Dataframe -dataframe = pandas.read_csv('gaze_positions.csv', delimiter="\t", low_memory=False) - -# Convert Panda dataframe into TimestampedGazePositions buffer precising the use of each specific column labels -ts_gaze_positions = GazeFeatures.TimeStampedGazePositions.from_dataframe(dataframe, timestamp = 'Recording timestamp [ms]', x = 'Gaze point X [px]', y = 'Gaze point Y [px]') - -``` -### Iterator - -Like [TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer), [TimeStampedGazePositions](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazePositions) class provides iterator feature: - -```python -for timestamp, gaze_position in ts_gaze_positions.items(): - - # Do something with each gaze position - ... - -``` diff --git a/docs/user_guide/gaze_analysis/introduction.md b/docs/user_guide/gaze_analysis/introduction.md deleted file mode 100644 index bf818ba..0000000 --- a/docs/user_guide/gaze_analysis/introduction.md +++ /dev/null @@ -1,7 +0,0 @@ -Gaze analysis -============= - -This section refers to: - -* [GazeFeatures](../../argaze.md/#argaze.GazeFeatures) -* [GazeAnalysis](../../argaze.md/#argaze.GazeAnalysis) \ No newline at end of file diff --git a/docs/user_guide/gaze_analysis/scan_path.md b/docs/user_guide/gaze_analysis/scan_path.md deleted file mode 100644 index 46af28b..0000000 --- a/docs/user_guide/gaze_analysis/scan_path.md +++ /dev/null @@ -1,169 +0,0 @@ -Scan path -========= - -[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines classes to handle successive fixations/saccades and analyse their spatial or temporal properties. - -## Fixation based scan path - -### Definition - -The [ScanPath](../../argaze.md/#argaze.GazeFeatures.ScanPath) class is defined as a list of [ScanSteps](../../argaze.md/#argaze.GazeFeatures.ScanStep) which are defined as a fixation and a consecutive saccade. - -![Fixation based scan path](../../img/scan_path.png) - -As fixations and saccades are identified, the scan path is built by calling respectively [append_fixation](../../argaze.md/#argaze.GazeFeatures.ScanPath.append_fixation) and [append_saccade](../../argaze.md/#argaze.GazeFeatures.ScanPath.append_saccade) methods. - -### Analysis - -[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines abstract [ScanPathAnalyzer](../../argaze.md/#argaze.GazeFeatures.ScanPathAnalyzer) classe to let add various analysis algorithms. - -Some scan path analysis are available thanks to [GazeAnalysis](../../argaze.md/#argaze.GazeAnalysis) submodule: - -* [K-Coefficient](../../argaze.md/#argaze.GazeAnalysis.KCoefficient) -* [Nearest Neighbor Index](../../argaze.md/#argaze.GazeAnalysis.NearestNeighborIndex) -* [Exploit Explore Ratio](../../argaze.md/#argaze.GazeAnalysis.ExploitExploreRatio) - -### Example - -Here is a sample of code to illustrate how to built a scan path and analyze it: - -``` python -from argaze import GazeFeatures -from argaze.GazeAnalysis import KCoefficient - -# Create a empty scan path -scan_path = GazeFeatures.ScanPath() - -# Create a K coefficient analyzer -kc_analyzer = KCoefficient.ScanPathAnalyzer() - -# Assuming a gaze movement is identified at ts time -...: - - # Fixation identified - if GazeFeatures.is_fixation(gaze_movement): - - # Append fixation to scan path : no step is created - scan_path.append_fixation(ts, gaze_movement) - - # Saccade identified - elif GazeFeatures.is_saccade(gaze_movement): - - # Append saccade to scan path : a new step should be created - new_step = scan_path.append_saccade(data_ts, gaze_movement) - - # Analyse scan path - if new_step: - - K = kc_analyzer.analyze(scan_path) - - # Do something with K metric - ... -``` - -## AOI based scan path - -### Definition - -The [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) class is defined as a list of [AOIScanSteps](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) which are defined as set of consecutives fixations looking at a same Area Of Interest (AOI) and a consecutive saccade. - -![AOI based scan path](../../img/aoi_scan_path.png) - -As fixations and saccades are identified, the scan path is built by calling respectively [append_fixation](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.append_fixation) and [append_saccade](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.append_saccade) methods. - -### Analysis - -[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines abstract [AOIScanPathAnalyzer](../../argaze.md/#argaze.GazeFeatures.AOIScanPathAnalyzer) classe to let add various analysis algorithms. - -Some scan path analysis are available thanks to [GazeAnalysis](../../argaze.md/#argaze.GazeAnalysis) submodule: - -* [Transition matrix](../../argaze.md/#argaze.GazeAnalysis.TransitionMatrix) -* [Entropy](../../argaze.md/#argaze.GazeAnalysis.Entropy) -* [Lempel-Ziv complexity](../../argaze.md/#argaze.GazeAnalysis.LempelZivComplexity) -* [N-Gram](../../argaze.md/#argaze.GazeAnalysis.NGram) -* [K-modified coefficient](../../argaze.md/#argaze.GazeAnalysis.KCoefficient) - -### Example - -Here is a sample of code to illustrate how to built a AOI scan path and analyze it: - -``` python -from argaze import GazeFeatures -from argaze.GazeAnalysis import LempelZivComplexity - -# Assuming all AOI names are listed -... - -# Create a empty AOI scan path -aoi_scan_path = GazeFeatures.AOIScanPath(aoi_names) - -# Create a Lempel-Ziv complexity analyzer -lzc_analyzer = LempelZivComplexity.AOIScanPathAnalyzer() - -# Assuming a gaze movement is identified at ts time -...: - - # Fixation identified - if GazeFeatures.is_fixation(gaze_movement): - - # Assuming fixation is detected as inside an AOI - ... - - # Append fixation to AOI scan path : a new step should be created - new_step = aoi_scan_path.append_fixation(ts, gaze_movement, looked_aoi_name) - - # Analyse AOI scan path - if new_step: - - LZC = kc_analyzer.analyze(aoi_scan_path) - - # Do something with LZC metric - ... - - # Saccade identified - elif GazeFeatures.is_saccade(gaze_movement): - - # Append saccade to scan path : no step is created - aoi_scan_path.append_saccade(data_ts, gaze_movement) - -``` - -### Advanced - -The [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) class provides some advanced features to analyse it. - -#### Letter sequence - -When a new [AOIScanStep](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) is created, the [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) internally affects a unique letter index related to its AOI to ease pattern analysis. -Then, the [AOIScanPath letter_sequence](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.letter_sequence) property returns the concatenation of each [AOIScanStep](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) letter. -The [AOIScanPath get_letter_aoi](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.get_letter_aoi) method helps to get back the AOI related to a letter index. - -``` python -# Assuming the following AOI scan path is built: Foo > Bar > Shu > Foo -aoi_scan_path = ... - -# Letter sequence representation should be: 'ABCA' -print(aoi_scan_path.letter_sequence) - -# Output should be: 'Bar' -print(aoi_scan_path.get_letter_aoi('B')) - -``` - -#### Transition matrix - -When a new [AOIScanStep](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) is created, the [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) internally counts the number of transitions from an AOI to another AOI to ease Markov chain analysis. -Then, the [AOIScanPath transition_matrix](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.transition_matrix) property returns a [Pandas DataFrame](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html) where indexes are transition departures and columns are transition destinations. - -Here is an exemple of transition matrix for the following [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath): Foo > Bar > Shu > Foo > Bar - -| |Foo|Bar|Shu| -|:--|:--|:--|:--| -|Foo|0 |2 |0 | -|Bar|0 |0 |1 | -|Shu|1 |0 |0 | - - -#### Fixations count - -The [AOIScanPath fixations_count](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.fixations_count) method returns the total number of fixations in the whole scan path and a dictionary to get the fixations count per AOI. diff --git a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md index 81efa40..637ba57 100644 --- a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md +++ b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md @@ -133,7 +133,7 @@ A [python Exception](https://docs.python.org/3/tutorial/errors.html#exceptions) ## Setup ArFrame image parameters -[ArFrame.image](../../argaze.md/#argaze.ArFeatures.ArFrame.image) method parameters can be configured thanks to a python dictionary. +[ArFrame.image](../../../argaze.md/#argaze.ArFeatures.ArFrame.image) method parameters can be configured thanks to a python dictionary. ```python # Assuming ArFrame is loaded diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md index ffc72c7..84730d4 100644 --- a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md +++ b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md @@ -1,5 +1,5 @@ -Add AOI analysis -================ +Enable AOI analysis +=================== The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class defines a space where to make matching of gaze movements with AOIs and inside which those matchings need to be analyzed. diff --git a/docs/user_guide/gaze_analysis_pipeline/introduction.md b/docs/user_guide/gaze_analysis_pipeline/introduction.md index 02aa82e..23b41a9 100644 --- a/docs/user_guide/gaze_analysis_pipeline/introduction.md +++ b/docs/user_guide/gaze_analysis_pipeline/introduction.md @@ -11,7 +11,7 @@ To build your own gaze analysis pipeline, you need to know: * [How to edit timestamped gaze positions](timestamped_gaze_positions_edition.md), * [How to load and execute gaze analysis pipeline](configuration_and_execution.md), -* [How to add AOI analysis](aoi_analysis.md), +* [How to enable AOI analysis](aoi_analysis.md), * [How to visualize ArFrame and ArLayers](visualisation.md), * [How to log resulted gaze analysis](logging.md), * [How to make heatmap image](heatmap.md). diff --git a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_matchers.md b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_matchers.md index c8fa63c..61338cc 100644 --- a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_matchers.md +++ b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_matchers.md @@ -3,7 +3,7 @@ AOI matchers ArGaze provides ready-to-use AOI matching algorithms. -Here are JSON samples to include the chosen module inside [ArLayer configuration](../ar_layer_configuration_and_execution.md) *aoi_matcher* entry. +Here are JSON samples to include the chosen module inside [ArLayer configuration](../aoi_analysis.md) *aoi_matcher* entry. ## Deviation circle coverage diff --git a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_scan_path_analyzers.md b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_scan_path_analyzers.md index 8d02967..ad1832d 100644 --- a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_scan_path_analyzers.md +++ b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_scan_path_analyzers.md @@ -3,7 +3,7 @@ AOI scan path analyzers ArGaze provides ready-to-use AOI scan path analysis algorithms. -Here are JSON samples to include a chosen module inside [ArLayer configuration](../ar_layer_configuration_and_execution.md) *aoi_scan_path_analyzers* entry. +Here are JSON samples to include a chosen module inside [ArLayer configuration](../aoi_analysis.md) *aoi_scan_path_analyzers* entry. ## Basic metrics diff --git a/docs/user_guide/gaze_features/gaze_movement.md b/docs/user_guide/gaze_features/gaze_movement.md new file mode 100644 index 0000000..83f67e1 --- /dev/null +++ b/docs/user_guide/gaze_features/gaze_movement.md @@ -0,0 +1,163 @@ +Gaze movement +============= + +## Definition + +!!! note + + *"The act of classifying eye movements into distinct events is, on a general level, driven by a desire to isolate different intervals of the data stream strongly correlated with certain oculomotor or cognitive properties."* + + Citation from ["One algorithm to rule them all? An evaluation and discussion of ten eye movement event-detection algorithms"](https://link.springer.com/article/10.3758/s13428-016-0738-9) article. + +[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines abstract [GazeMovement](../../argaze.md/#argaze.GazeFeatures.GazeMovement) class, then abstract [Fixation](../../argaze.md/#argaze.GazeFeatures.Fixation) and [Saccade](../../argaze.md/#argaze.GazeFeatures.Saccade) classes which inherit from [GazeMovement](../../argaze.md/#argaze.GazeFeatures.GazeMovement). + +The **positions** [GazeMovement](../../argaze.md/#argaze.GazeFeatures.GazeMovement) attribute contain all [GazePositions](../../argaze.md/#argaze.GazeFeatures.GazePosition) belonging to itself. + +![Fixation and Saccade](../../img/fixation_and_saccade.png) + +## Identification + +[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines abstract [GazeMovementIdentifier](../../argaze.md/#argaze.GazeFeatures.GazeMovementIdentifier) classe to let add various identification algorithms. + +Some gaze movement identification algorithms are available thanks to [GazeAnalysis](../../argaze.md/#argaze.GazeAnalysis) submodule: + +* [Dispersion threshold identification (I-DT)](../../argaze.md/#argaze.GazeAnalysis.DispersionThresholdIdentification) +* [Velocity threshold identification (I-VT)](../../argaze.md/#argaze.GazeAnalysis.VelocityThresholdIdentification) + +### Identify method + +[GazeMovementIdentifier.identify](../../argaze.md/#argaze.GazeFeatures.GazeMovementIdentifier.identify) method allows to fed its identification algorithm with successive gaze positions to output Fixation, Saccade or any kind of GazeMovement instances. + +Here is a sample of code based on [I-DT](../../argaze.md/#argaze.GazeAnalysis.DispersionThresholdIdentification) algorithm to illustrate how to use it: + +``` python +from argaze import GazeFeatures +from argaze.GazeAnalysis import DispersionThresholdIdentification + +# Create a gaze movement identifier based on dispersion algorithm with 50px max deviation 200 ms max duration thresholds +gaze_movement_identifier = DispersionThresholdIdentification.GazeMovementIdentifier(50, 200) + +# Assuming that timestamped gaze positions are provided through live stream or later data reading +...: + + gaze_movement = gaze_movement_identifier.identify(timestamp, gaze_position) + + # Fixation identified + if GazeFeatures.is_fixation(gaze_movement): + + # Access to first gaze position of identified fixation + start_ts, start_position = gaze_movement.positions.first + + # Access to fixation duration + print('duration: {gaze_movement.duration}') + + # Iterate over all gaze positions of identified fixation + for ts, position in gaze_movement.positions.items(): + + # Do something with each fixation position + ... + + # Saccade identified + elif GazeFeatures.is_saccade(gaze_movement): + + # Access to first gaze position of identified saccade + start_ts, start_position = gaze_movement.positions.first + + # Access to saccade amplitude + print('amplitude: {gaze_movement.amplitude}') + + # Iterate over all gaze positions of identified saccade + for ts, position in gaze_movement.positions.items(): + + # Do something with each saccade position + ... + + # No gaze movement identified + else: + + continue + +``` + +### Browse method + +[GazeMovementIdentifier.browse](../../argaze.md/#argaze.GazeFeatures.GazeMovementIdentifier.browse) method allows to pass a [TimeStampedGazePositions](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazePositions) buffer to apply identification algorithm on all gaze positions inside. + +Identified gaze movements are returned through: + +* [TimeStampedGazeMovements](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeMovements) instance where all fixations are stored by starting gaze position timestamp. +* [TimeStampedGazeMovements](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeMovements) instance where all saccades are stored by starting gaze position timestamp. +* [TimeStampedGazeStatus](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeStatus) instance where all gaze positions are linked to a fixation or saccade index. + +``` python +# Assuming that timestamped gaze positions are provided through data reading + +ts_fixations, ts_saccades, ts_status = gaze_movement_identifier.browse(ts_gaze_positions) + +``` + +* ts_fixations would look like: + +|timestamp|positions |duration|dispersion|focus | +|:--------|:-------------------------------------------------------------|:-------|:---------|:--------| +|60034 |{"60034":[846,620], "60044":[837,641], "60054":[835,649], ...}|450 |40 |(840,660)| +|60504 |{"60504":[838,667], "60514":[838,667], "60524":[837,669], ...}|100 |38 |(834,651)| +|... |... |... |.. |... | + +* ts_saccades would look like: + +|timestamp|positions |duration| +|:--------|:---------------------------------------|:-------| +|60484 |{"60484":[836, 669], "60494":[837, 669]}|10 | +|60594 |{"60594":[833, 613], "60614":[927, 601]}|20 | +|... |... |... | + +* ts_status would look like: + +|timestamp|position |type |index| +|:--------|:---------|:-------|:----| +|60034 |(846, 620)|Fixation|1 | +|60044 |(837, 641)|Fixation|1 | +|... |... |... |. | +|60464 |(836, 668)|Fixation|1 | +|60474 |(836, 668)|Fixation|1 | +|60484 |(836, 669)|Saccade |1 | +|60494 |(837, 669)|Saccade |1 | +|60504 |(838, 667)|Fixation|2 | +|60514 |(838, 667)|Fixation|2 | +|... |... |... |. | +|60574 |(825, 629)|Fixation|2 | +|60584 |(829, 615)|Fixation|2 | +|60594 |(833, 613)|Saccade |2 | +|60614 |(927, 601)|Saccade |2 | +|60624 |(933, 599)|Fixation|3 | +|60634 |(934, 603)|Fixation|3 | +|... |... |... |. | + + +!!! note + [TimeStampedGazeMovements](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeMovements), [TimeStampedGazeMovements](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeMovements) and [TimeStampedGazeStatus](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeStatus) classes inherit from [TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) class. + + Read [Timestamped data](../timestamped_data/introduction.md) section to understand all features it provides. + +### Generator method + +[GazeMovementIdentifier](../../argaze.md/#argaze.GazeFeatures.GazeMovementIdentifier) can be called with a [TimeStampedGazePositions](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazePositions) buffer in argument to generate gaze movement each time one is identified. + +``` python +# Assuming that timestamped gaze positions are provided through data reading + +for ts, gaze_movement in gaze_movement_identifier(ts_gaze_positions): + + # Fixation identified + if GazeFeatures.is_fixation(gaze_movement): + + # Do something with each fixation + ... + + # Saccade identified + elif GazeFeatures.is_saccade(gaze_movement): + + # Do something with each saccade + ... +``` \ No newline at end of file diff --git a/docs/user_guide/gaze_features/gaze_position.md b/docs/user_guide/gaze_features/gaze_position.md new file mode 100644 index 0000000..48495b4 --- /dev/null +++ b/docs/user_guide/gaze_features/gaze_position.md @@ -0,0 +1,98 @@ +Gaze position +============= + +[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines a [GazePosition](../../argaze.md/#argaze.GazeFeatures.GazePosition) class to handle point coordinates with a precision value. + +``` python +from argaze import GazeFeatures + +# Define a basic gaze position +gaze_position = GazeFeatures.GazePosition((123, 456)) + +# Define a gaze position with a precision value +gaze_position = GazeFeatures.GazePosition((789, 765), precision=10) + +# Access to gaze position value and precision +print(f'position: {gaze_position.value}') +print(f'precision: {gaze_position.precision}') + +``` + +## Validity + +[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines also a [UnvalidGazePosition](../../argaze.md/#argaze.GazeFeatures.UnvalidGazePosition) class that inherits from [GazePosition](../../argaze.md/#argaze.GazeFeatures.GazePosition) to handle case where no gaze position exists because of any specific device reason. + +``` python +from argaze import GazeFeatures + +# Define a basic unvalid gaze position +gaze_position = GazeFeatures.UnvalidGazePosition() + +# Define a basic unvalid gaze position with a message value +gaze_position = GazeFeatures.UnvalidGazePosition("Something bad happened") + +# Access to gaze position validity +print(f'validity: {gaze_position.valid}') + +``` + +## Distance + +[GazePosition](../../argaze.md/#argaze.GazeFeatures.GazePosition) class provides a **distance** method to calculate the distance to another gaze position instance. + +![Distance](../../img/distance.png) + +``` python +# Distance between A and B positions +d = gaze_position_A.distance(gaze_position_B) +``` + +## Overlapping + +[GazePosition](../../argaze.md/#argaze.GazeFeatures.GazePosition) class provides an **overlap** method to test if a gaze position overlaps another one considering their precisions. + +![Gaze overlapping](../../img/overlapping.png) + +``` python +# Check that A overlaps B +if gaze_position_A.overlap(gaze_position_B): + + # Do something if A overlaps B + ... + +# Check that A overlaps B and B overlaps A +if gaze_position_A.overlap(gaze_position_B, both=True): + + # Do something if A overlaps B AND B overlaps A + ... +``` + +## Timestamped gaze positions + +[TimeStampedGazePositions](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazePositions) inherits from [TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) class to handle especially gaze positions. + +### Import from dataframe + +It is possible to load timestamped gaze positions from a [Pandas DataFrame](https://pandas.pydata.org/docs/getting_started/intro_tutorials/01_table_oriented.html#min-tut-01-tableoriented) object. + +```python +import pandas + +# Load gaze positions from a CSV file into Panda Dataframe +dataframe = pandas.read_csv('gaze_positions.csv', delimiter="\t", low_memory=False) + +# Convert Panda dataframe into TimestampedGazePositions buffer precising the use of each specific column labels +ts_gaze_positions = GazeFeatures.TimeStampedGazePositions.from_dataframe(dataframe, timestamp = 'Recording timestamp [ms]', x = 'Gaze point X [px]', y = 'Gaze point Y [px]') + +``` +### Iterator + +Like [TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer), [TimeStampedGazePositions](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazePositions) class provides iterator feature: + +```python +for timestamp, gaze_position in ts_gaze_positions.items(): + + # Do something with each gaze position + ... + +``` diff --git a/docs/user_guide/gaze_features/introduction.md b/docs/user_guide/gaze_features/introduction.md new file mode 100644 index 0000000..bf818ba --- /dev/null +++ b/docs/user_guide/gaze_features/introduction.md @@ -0,0 +1,7 @@ +Gaze analysis +============= + +This section refers to: + +* [GazeFeatures](../../argaze.md/#argaze.GazeFeatures) +* [GazeAnalysis](../../argaze.md/#argaze.GazeAnalysis) \ No newline at end of file diff --git a/docs/user_guide/gaze_features/scan_path.md b/docs/user_guide/gaze_features/scan_path.md new file mode 100644 index 0000000..46af28b --- /dev/null +++ b/docs/user_guide/gaze_features/scan_path.md @@ -0,0 +1,169 @@ +Scan path +========= + +[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines classes to handle successive fixations/saccades and analyse their spatial or temporal properties. + +## Fixation based scan path + +### Definition + +The [ScanPath](../../argaze.md/#argaze.GazeFeatures.ScanPath) class is defined as a list of [ScanSteps](../../argaze.md/#argaze.GazeFeatures.ScanStep) which are defined as a fixation and a consecutive saccade. + +![Fixation based scan path](../../img/scan_path.png) + +As fixations and saccades are identified, the scan path is built by calling respectively [append_fixation](../../argaze.md/#argaze.GazeFeatures.ScanPath.append_fixation) and [append_saccade](../../argaze.md/#argaze.GazeFeatures.ScanPath.append_saccade) methods. + +### Analysis + +[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines abstract [ScanPathAnalyzer](../../argaze.md/#argaze.GazeFeatures.ScanPathAnalyzer) classe to let add various analysis algorithms. + +Some scan path analysis are available thanks to [GazeAnalysis](../../argaze.md/#argaze.GazeAnalysis) submodule: + +* [K-Coefficient](../../argaze.md/#argaze.GazeAnalysis.KCoefficient) +* [Nearest Neighbor Index](../../argaze.md/#argaze.GazeAnalysis.NearestNeighborIndex) +* [Exploit Explore Ratio](../../argaze.md/#argaze.GazeAnalysis.ExploitExploreRatio) + +### Example + +Here is a sample of code to illustrate how to built a scan path and analyze it: + +``` python +from argaze import GazeFeatures +from argaze.GazeAnalysis import KCoefficient + +# Create a empty scan path +scan_path = GazeFeatures.ScanPath() + +# Create a K coefficient analyzer +kc_analyzer = KCoefficient.ScanPathAnalyzer() + +# Assuming a gaze movement is identified at ts time +...: + + # Fixation identified + if GazeFeatures.is_fixation(gaze_movement): + + # Append fixation to scan path : no step is created + scan_path.append_fixation(ts, gaze_movement) + + # Saccade identified + elif GazeFeatures.is_saccade(gaze_movement): + + # Append saccade to scan path : a new step should be created + new_step = scan_path.append_saccade(data_ts, gaze_movement) + + # Analyse scan path + if new_step: + + K = kc_analyzer.analyze(scan_path) + + # Do something with K metric + ... +``` + +## AOI based scan path + +### Definition + +The [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) class is defined as a list of [AOIScanSteps](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) which are defined as set of consecutives fixations looking at a same Area Of Interest (AOI) and a consecutive saccade. + +![AOI based scan path](../../img/aoi_scan_path.png) + +As fixations and saccades are identified, the scan path is built by calling respectively [append_fixation](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.append_fixation) and [append_saccade](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.append_saccade) methods. + +### Analysis + +[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines abstract [AOIScanPathAnalyzer](../../argaze.md/#argaze.GazeFeatures.AOIScanPathAnalyzer) classe to let add various analysis algorithms. + +Some scan path analysis are available thanks to [GazeAnalysis](../../argaze.md/#argaze.GazeAnalysis) submodule: + +* [Transition matrix](../../argaze.md/#argaze.GazeAnalysis.TransitionMatrix) +* [Entropy](../../argaze.md/#argaze.GazeAnalysis.Entropy) +* [Lempel-Ziv complexity](../../argaze.md/#argaze.GazeAnalysis.LempelZivComplexity) +* [N-Gram](../../argaze.md/#argaze.GazeAnalysis.NGram) +* [K-modified coefficient](../../argaze.md/#argaze.GazeAnalysis.KCoefficient) + +### Example + +Here is a sample of code to illustrate how to built a AOI scan path and analyze it: + +``` python +from argaze import GazeFeatures +from argaze.GazeAnalysis import LempelZivComplexity + +# Assuming all AOI names are listed +... + +# Create a empty AOI scan path +aoi_scan_path = GazeFeatures.AOIScanPath(aoi_names) + +# Create a Lempel-Ziv complexity analyzer +lzc_analyzer = LempelZivComplexity.AOIScanPathAnalyzer() + +# Assuming a gaze movement is identified at ts time +...: + + # Fixation identified + if GazeFeatures.is_fixation(gaze_movement): + + # Assuming fixation is detected as inside an AOI + ... + + # Append fixation to AOI scan path : a new step should be created + new_step = aoi_scan_path.append_fixation(ts, gaze_movement, looked_aoi_name) + + # Analyse AOI scan path + if new_step: + + LZC = kc_analyzer.analyze(aoi_scan_path) + + # Do something with LZC metric + ... + + # Saccade identified + elif GazeFeatures.is_saccade(gaze_movement): + + # Append saccade to scan path : no step is created + aoi_scan_path.append_saccade(data_ts, gaze_movement) + +``` + +### Advanced + +The [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) class provides some advanced features to analyse it. + +#### Letter sequence + +When a new [AOIScanStep](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) is created, the [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) internally affects a unique letter index related to its AOI to ease pattern analysis. +Then, the [AOIScanPath letter_sequence](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.letter_sequence) property returns the concatenation of each [AOIScanStep](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) letter. +The [AOIScanPath get_letter_aoi](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.get_letter_aoi) method helps to get back the AOI related to a letter index. + +``` python +# Assuming the following AOI scan path is built: Foo > Bar > Shu > Foo +aoi_scan_path = ... + +# Letter sequence representation should be: 'ABCA' +print(aoi_scan_path.letter_sequence) + +# Output should be: 'Bar' +print(aoi_scan_path.get_letter_aoi('B')) + +``` + +#### Transition matrix + +When a new [AOIScanStep](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) is created, the [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) internally counts the number of transitions from an AOI to another AOI to ease Markov chain analysis. +Then, the [AOIScanPath transition_matrix](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.transition_matrix) property returns a [Pandas DataFrame](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html) where indexes are transition departures and columns are transition destinations. + +Here is an exemple of transition matrix for the following [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath): Foo > Bar > Shu > Foo > Bar + +| |Foo|Bar|Shu| +|:--|:--|:--|:--| +|Foo|0 |2 |0 | +|Bar|0 |0 |1 | +|Shu|1 |0 |0 | + + +#### Fixations count + +The [AOIScanPath fixations_count](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.fixations_count) method returns the total number of fixations in the whole scan path and a dictionary to get the fixations count per AOI. diff --git a/mkdocs.yml b/mkdocs.yml index c2f4c53..c1c2af6 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -31,14 +31,6 @@ nav: - Advanced Topics: - user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md -# - ArUco Markers: -# - user_guide/aruco_markers/introduction.md -# - user_guide/aruco_markers/dictionary_selection.md -# - user_guide/aruco_markers/markers_creation.md -# - user_guide/aruco_markers/camera_calibration.md -# - user_guide/aruco_markers/markers_detection.md -# - user_guide/aruco_markers/markers_pose_estimation.md -# - user_guide/aruco_markers/markers_scene_description.md # - Areas Of Interest: # - user_guide/areas_of_interest/introduction.md # - user_guide/areas_of_interest/aoi_scene_description.md @@ -46,11 +38,11 @@ nav: # - user_guide/areas_of_interest/vision_cone_filtering.md # - user_guide/areas_of_interest/aoi_matching.md # - user_guide/areas_of_interest/heatmap.md -# - Gaze Analysis: -# - user_guide/gaze_analysis/introduction.md -# - user_guide/gaze_analysis/gaze_position.md -# - user_guide/gaze_analysis/gaze_movement.md -# - user_guide/gaze_analysis/scan_path.md +# - Gaze Features: +# - user_guide/gaze_features/introduction.md +# - user_guide/gaze_features/gaze_position.md +# - user_guide/gaze_features/gaze_movement.md +# - user_guide/gaze_features/scan_path.md # - Timestamped data: # - user_guide/timestamped_data/introduction.md # - user_guide/timestamped_data/ordered_dictionary.md diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index ad17df2..54ef918 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -1044,7 +1044,7 @@ class ArFrame(): return image - def image(self, **kwargs) -> numpy.array: + def image(self, **kwargs: dict) -> numpy.array: """ Get frame image. @@ -1248,7 +1248,7 @@ class ArScene(): return ArScene(new_scene_name, new_layers, new_frames, **scene_data) - def estimate_pose(self, detected_features) -> Tuple[numpy.array, numpy.array]: + def estimate_pose(self, detected_features: Any) -> Tuple[numpy.array, numpy.array]: """Define abstract estimate scene pose method. Parameters: @@ -1298,7 +1298,7 @@ class ArScene(): # Project layer aoi scene yield name, aoi_scene_copy.project(tvec, rvec, self.parent.aruco_detector.optic_parameters.K) - def draw(self, image: numpy.array, **kwargs): + def draw(self, image: numpy.array, **kwargs: dict): """ Draw scene into image. @@ -1495,7 +1495,7 @@ class ArCamera(ArFrame): # Unlock camera frame exploitation self._frame_lock.release() - def image(self, **kwargs) -> numpy.array: + def image(self, **kwargs: dict) -> numpy.array: """ Get frame image. diff --git a/src/argaze/ArUcoMarkers/ArUcoCamera.py b/src/argaze/ArUcoMarkers/ArUcoCamera.py index 4f555fb..4c3f042 100644 --- a/src/argaze/ArUcoMarkers/ArUcoCamera.py +++ b/src/argaze/ArUcoMarkers/ArUcoCamera.py @@ -73,7 +73,7 @@ class ArUcoCamera(ArFeatures.ArCamera): return output @classmethod - def from_dict(self, aruco_camera_data, working_directory: str = None) -> ArUcoCameraType: + def from_dict(self, aruco_camera_data: dict, working_directory: str = None) -> ArUcoCameraType: """ Load ArUcoCamera from dictionary. @@ -211,7 +211,7 @@ class ArUcoCamera(ArFeatures.ArCamera): # Return dection time and exceptions return detection_time, exceptions - def __image(self, draw_detected_markers: dict = None, draw_scenes: dict = None, draw_optic_parameters_grid: dict = None, **kwargs) -> numpy.array: + def __image(self, draw_detected_markers: dict = None, draw_scenes: dict = None, draw_optic_parameters_grid: dict = None, **kwargs: dict) -> numpy.array: """Get frame image with ArUco detection visualisation. Parameters: @@ -253,7 +253,7 @@ class ArUcoCamera(ArFeatures.ArCamera): return image - def image(self, **kwargs) -> numpy.array: + def image(self, **kwargs: dict) -> numpy.array: """ Get frame image. diff --git a/src/argaze/AreaOfInterest/AOI2DScene.py b/src/argaze/AreaOfInterest/AOI2DScene.py index f6b8dcb..564f65c 100644 --- a/src/argaze/AreaOfInterest/AOI2DScene.py +++ b/src/argaze/AreaOfInterest/AOI2DScene.py @@ -125,20 +125,23 @@ class AOI2DScene(AOIFeatures.AOIScene): return aoi2D_scene ''' - def dimensionalize(self, frame_3d: AOIFeatures.AreaOfInterest, size: tuple) -> AOI3DSceneType: + def dimensionalize(self, rectangle_3d: AOIFeatures.AreaOfInterest, size: tuple) -> AOI3DSceneType: """ Convert to 3D scene considering it is inside of 3D rectangular frame. Parameters: - aoi_frame_3d: rectangle 3D AOI to use as referential plane + rectangle_3d: rectangle 3D AOI to use as referential plane size: size of the frame in pixel Returns: AOI 3D scene """ + assert(rectangle_3d.dimension == 3) + assert(rectangle_3d.points_number == 4) + # Vectorize outter_axis function - vfunc = numpy.vectorize(frame_3d.outter_axis) + vfunc = numpy.vectorize(rectangle_3d.outter_axis) # Prepare new AOI 3D scene aoi3D_scene = AOI3DScene.AOI3DScene() -- cgit v1.1 From 0410c9980f2c243caa2f2f4f72b814efb72ac654 Mon Sep 17 00:00:00 2001 From: Theo De La Hogue Date: Sat, 23 Sep 2023 07:59:35 +0200 Subject: Fixing missing import. --- src/argaze/ArFeatures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 54ef918..10e9687 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -7,7 +7,7 @@ __credits__ = [] __copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)" __license__ = "BSD" -from typing import TypeVar, Tuple +from typing import TypeVar, Tuple, Any from dataclasses import dataclass, field import json import os -- cgit v1.1 From 998d6c1b6c4216d887f74375e262147a6fbeff67 Mon Sep 17 00:00:00 2001 From: Theo De La Hogue Date: Sat, 23 Sep 2023 08:11:40 +0200 Subject: Fixing map method to use clockwise order. --- src/argaze/ArFeatures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 10e9687..a419d93 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -1483,7 +1483,7 @@ class ArCamera(ArFrame): # Apply perspective transform algorithm to fill aoi frame background width, height = frame.size - destination = numpy.float32([[0, height],[width, height],[width, 0],[0, 0]]) + destination = numpy.float32([[0, 0], [width, 0], [width, height], [0, height]]) mapping = cv2.getPerspectiveTransform(aoi_2d.astype(numpy.float32), destination) frame.background = cv2.warpPerspective(self.background, mapping, (width, height)) -- cgit v1.1 From 61df29ded4a1ffef3f9677f0d13f07821f07fcb7 Mon Sep 17 00:00:00 2001 From: Theo De La Hogue Date: Mon, 25 Sep 2023 10:31:22 +0200 Subject: Changing layer name policy warning. --- docs/user_guide/aruco_markers_pipeline/aoi_projection.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_projection.md b/docs/user_guide/aruco_markers_pipeline/aoi_projection.md index 027f805..2f764f8 100644 --- a/docs/user_guide/aruco_markers_pipeline/aoi_projection.md +++ b/docs/user_guide/aruco_markers_pipeline/aoi_projection.md @@ -97,7 +97,7 @@ The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically !!! warning "Layer name policy" - An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layer is projected into [an ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layer, **provided they have the same name**. + An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layer is projected into an [ ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layer, **provided they have the same name**. !!! note -- cgit v1.1 From a6561584fa398b2626389f558ce59ce8d73e9dab Mon Sep 17 00:00:00 2001 From: Theo De La Hogue Date: Mon, 25 Sep 2023 10:32:10 +0200 Subject: Adding AOI frame chapter link. --- docs/user_guide/aruco_markers_pipeline/introduction.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/user_guide/aruco_markers_pipeline/introduction.md b/docs/user_guide/aruco_markers_pipeline/introduction.md index f781fe8..dd82900 100644 --- a/docs/user_guide/aruco_markers_pipeline/introduction.md +++ b/docs/user_guide/aruco_markers_pipeline/introduction.md @@ -7,7 +7,7 @@ The OpenCV library provides a module to detect fiducial markers into a picture a ![OpenCV ArUco markers](https://pyimagesearch.com/wp-content/uploads/2020/12/aruco_generate_tags_header.png) -The ArGaze [ArUcoMarkers submodule](../../argaze.md/#argaze.ArUcoMarkers) eases markers creation, optic calibration, markers detection and 3D scene pose estimation through a set of high level classes. +The ArGaze [ArUcoMarkers submodule](../../argaze.md/#argaze.ArUcoMarkers) eases markers creation, markers detection and 3D scene pose estimation through a set of high level classes. First, let's look at the schema below: it gives an overview of the main notions involved in the following chapters. @@ -19,8 +19,8 @@ To build your own ArUco markers pipeline, you need to know: * [How to describe scene's AOI](aoi_description.md), * [How to load and execute ArUco markers pipeline](configuration_and_execution.md), * [How to estimate scene pose](pose_estimation.md), -* [How to project AOI into camera frame](aoi_projection.md) - +* [How to project AOI into camera frame](aoi_projection.md), +* [How to define an AOI as a frame](aoi_frame.md) More advanced features are also explained like: -- cgit v1.1 From f26f79113febf8a8dcccf18dabfcdd4bef04ce86 Mon Sep 17 00:00:00 2001 From: Theo De La Hogue Date: Mon, 25 Sep 2023 10:32:43 +0200 Subject: REmoving a uselss comma --- docs/user_guide/aruco_markers_pipeline/pose_estimation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/user_guide/aruco_markers_pipeline/pose_estimation.md b/docs/user_guide/aruco_markers_pipeline/pose_estimation.md index 6acafee..d7da336 100644 --- a/docs/user_guide/aruco_markers_pipeline/pose_estimation.md +++ b/docs/user_guide/aruco_markers_pipeline/pose_estimation.md @@ -37,7 +37,7 @@ Here is an extract from the JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMark } } } - } + }, ... "image_parameters": { ... -- cgit v1.1 From 8f2b87bfec622dd32e90d9bfa17dfcda42add4fe Mon Sep 17 00:00:00 2001 From: Theo De La Hogue Date: Mon, 25 Sep 2023 14:45:52 +0200 Subject: Improving AOI description documentation. --- docs/img/aoi_3d_description.png | Bin 0 -> 14538 bytes docs/img/aoi_description.png | Bin 14538 -> 0 bytes .../aruco_markers_pipeline/aoi_3d_description.md | 62 ++++++++ .../aruco_markers_pipeline/aoi_3d_projection.md | 159 +++++++++++++++++++++ .../aruco_markers_pipeline/aoi_description.md | 62 -------- .../user_guide/aruco_markers_pipeline/aoi_frame.md | 64 +++++++++ .../aruco_markers_pipeline/aoi_projection.md | 159 --------------------- .../configuration_and_execution.md | 2 +- .../aruco_markers_pipeline/introduction.md | 4 +- .../gaze_analysis_pipeline/aoi_2d_analysis.md | 96 +++++++++++++ .../gaze_analysis_pipeline/aoi_2d_description.md | 70 +++++++++ .../gaze_analysis_pipeline/aoi_analysis.md | 96 ------------- .../configuration_and_execution.md | 2 +- .../gaze_analysis_pipeline/introduction.md | 3 +- .../pipeline_modules/aoi_matchers.md | 2 +- .../pipeline_modules/aoi_scan_path_analyzers.md | 2 +- mkdocs.yml | 7 +- 17 files changed, 463 insertions(+), 327 deletions(-) create mode 100644 docs/img/aoi_3d_description.png delete mode 100644 docs/img/aoi_description.png create mode 100644 docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md create mode 100644 docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md delete mode 100644 docs/user_guide/aruco_markers_pipeline/aoi_description.md create mode 100644 docs/user_guide/aruco_markers_pipeline/aoi_frame.md delete mode 100644 docs/user_guide/aruco_markers_pipeline/aoi_projection.md create mode 100644 docs/user_guide/gaze_analysis_pipeline/aoi_2d_analysis.md create mode 100644 docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md delete mode 100644 docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md diff --git a/docs/img/aoi_3d_description.png b/docs/img/aoi_3d_description.png new file mode 100644 index 0000000..794ef68 Binary files /dev/null and b/docs/img/aoi_3d_description.png differ diff --git a/docs/img/aoi_description.png b/docs/img/aoi_description.png deleted file mode 100644 index 794ef68..0000000 Binary files a/docs/img/aoi_description.png and /dev/null differ diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md new file mode 100644 index 0000000..13f9c86 --- /dev/null +++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md @@ -0,0 +1,62 @@ +Describe 3D AOI +=============== + +Once [ArUco markers are placed into a scene](aruco_markers_description.md), areas of interest need to be described into the same 3D referential. + +In the example scene, each screen is considered as an area of interest more the blue triangle area inside the top screen. + +![3D AOI description](../../img/aoi_3d_description.png) + +All AOIs need to be described from same origin than markers in a [right-handed 3D axis](https://robotacademy.net.au/lesson/right-handed-3d-coordinate-frame/) where: + +* +X is pointing to the right, +* +Y is pointing to the top, +* +Z is pointing to the backward. + +!!! warning + All AOIs spatial values must be given in **centimeters**. + +### Edit OBJ file description + +OBJ file format could be exported from most 3D editors. + +``` obj +o YellowSquare +v 6.200003 -7.275252 25.246159 +v 31.200003 -7.275252 25.246159 +v 6.200003 1.275252 1.753843 +v 31.200003 1.275252 1.753843 +s off +f 1 2 4 3 +o GrayRectangle +v 2.500000 2.500000 -0.500000 +v 37.500000 2.500000 -0.500000 +v 2.500000 27.500000 -0.500000 +v 37.500000 27.500000 -0.500000 +s off +f 5 6 8 7 +o BlueTriangle +v 12.500002 7.500000 -0.500000 +v 27.500002 7.500000 -0.500000 +v 20.000002 22.500000 -0.500000 +s off +f 9 10 11 +``` + +Here are common OBJ file features needed to describe AOIs: + +* Object lines (starting with *o* key) indicate AOI name. +* Vertice lines (starting with *v* key) indicate AOI vertices. +* Face (starting with *f* key) link vertices together. + +### Edit JSON file description + +JSON file format allows to describe AOIs vertices. + +``` json +{ + "YellowSquare": [[6.2, -7.275252, 25.246159], [31.2, -7.275252, 25.246159], [31.2, 1.275252, 1.753843], [6.2, 1.275252, 1.753843]], + "GrayRectangle": [[2.5, 2.5, -0.5], [37.5, 2.5, -0.5], [37.5, 27.5, -0.5], [2.5, 27.5, -0.5]], + "BlueTriangle": [[12.5, 7.5, -0.5], [27.5, 7.5, -0.5], [20, 22.5, -0.5]] +} +``` diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md new file mode 100644 index 0000000..bdebd6c --- /dev/null +++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md @@ -0,0 +1,159 @@ +Project AOI into camera frame +============================= + +Once [ArUcoScene pose is estimated](pose_estimation.md) and [3D AOIs are described](aoi_3d_description.md), AOIs can be projected into [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) frame. + +![3D AOI projection](../../img/aruco_camera_aoi_projection.png) + +## Add ArLayer to ArUcoScene to load AOI + +The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class allows to load areas of interest description. An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) instance can contains multiples [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer). + +Here is the previous extract where one layer is added to the [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) configuration: + +```json +{ + "name": "My FullHD camera", + "size": [1920, 1080], + ... + "scenes": { + "MyScene" : { + "aruco_markers_group": { + ... + }, + "layers": { + "MyLayer": { + "aoi_scene": { + "YellowSquare": [[6.2, -7.275252, 25.246159], [31.2, -7.275252, 25.246159], [31.2, 1.275252, 1.753843], [6.2, 1.275252, 1.753843]], + "GrayRectangle": [[2.5, 2.5, -0.5], [37.5, 2.5, -0.5], [37.5, 27.5, -0.5], [2.5, 27.5, -0.5]], + "BlueTriangle": [[12.5, 7.5, -0.5], [27.5, 7.5, -0.5], [20, 22.5, -0.5]] + } + } + } + } + } + ... +} +``` + +Now, let's understand the meaning of each JSON entry. + +### "MyLayer" + +The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose. + +### AOI Scene + +The [AOIScene](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AOIScene) defines a set of 3D [AreaOfInterest](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) registered by name. + +## Add ArLayer to ArUcoCamera to project 3D AOIs + +Here is the previous extract where one layer is added to the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) and displayed: + +```json +{ + "name": "My FullHD camera", + "size": [1920, 1080], + ... + "scenes": { + "MyScene" : { + "aruco_markers_group": { + ... + }, + "layers": { + "MyLayer": { + "aoi_scene": { + ... + } + } + } + } + }, + "layers": { + "MyLayer": {} + } + ... + "image_parameters": { + ... + "draw_layers": { + "MyLayer": { + "draw_aoi_scene": { + "draw_aoi": { + "color": [255, 255, 255], + "border_size": 1 + } + } + } + } + } +} +``` + +Now, let's understand the meaning of each JSON entry. + +### "MyLayer" + +The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose. + +!!! warning "Layer name policy" + + An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layer is projected into an [ ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layer, **provided they have the same name**. + +!!! note + + [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layers are projected into their dedicated [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layers when calling the [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method. + +## Add 2D AOIs analysis + +When a scene layer is projected into a camera layer, it means that the 3D [ArLayer.aoi_scene](../../argaze.md/#argaze.ArFeatures.ArLayer.aoi_scene) description of the scene becomes the 2D camera's [ArLayer.aoi_scene](../../argaze.md/#argaze.ArFeatures.ArLayer.aoi_scene) description of the camera. + +Therefore, it means that [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) benefits from all the services described in [2D AOIs analysis pipeline section](../gaze_analysis_pipeline/aoi_2d_analysis.md). + +Here is the previous extract where AOI matcher, AOI scan path and AOI scan path analyzers are added to the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layer: + +```json +{ + "name": "My FullHD camera", + "size": [1920, 1080], + ... + "scenes": { + "MyScene" : { + "aruco_markers_group": { + ... + }, + "layers": { + "MyLayer": { + "aoi_scene": { + ... + } + } + } + } + }, + "layers": { + "MyLayer": { + "aoi_matcher": { + "DeviationCircleCoverage": { + "coverage_threshold": 0.5 + } + }, + "aoi_scan_path": { + "duration_max": 30000 + }, + "aoi_scan_path_analyzers": { + "Basic": {}, + "TransitionMatrix": {}, + "NGram": { + "n_min": 3, + "n_max": 5 + } + } + } + } + ... +} +``` + +!!! warning + + Adding scan path and scan path analyzers to an [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layer doesn't make sense if the camera is moving. diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_description.md b/docs/user_guide/aruco_markers_pipeline/aoi_description.md deleted file mode 100644 index 101ec9f..0000000 --- a/docs/user_guide/aruco_markers_pipeline/aoi_description.md +++ /dev/null @@ -1,62 +0,0 @@ -Describe AOI -============ - -Once [ArUco markers are placed into a scene](aruco_markers_description.md), areas of interest need to be described into the same 3D referential. - -In the example scene, each screen is considered as an area of interest more the blue triangle area inside the top screen. - -![AOI description](../../img/aoi_description.png) - -All AOIs need to be described from same origin than markers in a [right-handed 3D axis](https://robotacademy.net.au/lesson/right-handed-3d-coordinate-frame/) where: - -* +X is pointing to the right, -* +Y is pointing to the top, -* +Z is pointing to the backward. - -!!! warning - All AOIs spatial values must be given in **centimeters**. - -### Edit OBJ file description - -OBJ file format could be exported from most 3D editors. - -``` obj -o YellowSquare -v 6.200003 -7.275252 25.246159 -v 31.200003 -7.275252 25.246159 -v 6.200003 1.275252 1.753843 -v 31.200003 1.275252 1.753843 -s off -f 1 2 4 3 -o GrayRectangle -v 2.500000 2.500000 -0.500000 -v 37.500000 2.500000 -0.500000 -v 2.500000 27.500000 -0.500000 -v 37.500000 27.500000 -0.500000 -s off -f 5 6 8 7 -o BlueTriangle -v 12.500002 7.500000 -0.500000 -v 27.500002 7.500000 -0.500000 -v 20.000002 22.500000 -0.500000 -s off -f 9 10 11 -``` - -Here are common OBJ file features needed to describe AOIs: - -* Object lines (starting with *o* key) indicate AOI name. -* Vertice lines (starting with *v* key) indicate AOI vertices. -* Face (starting with *f* key) link vertices together. - -### Edit JSON file description - -JSON file format allows to describe AOIs vertices. - -``` json -{ - "YellowSquare": [[6.2, -7.275252, 25.246159], [31.2, -7.275252, 25.246159], [31.2, 1.275252, 1.753843], [6.2, 1.275252, 1.753843]], - "GrayRectangle": [[2.5, 2.5, -0.5], [37.5, 2.5, -0.5], [37.5, 27.5, -0.5], [2.5, 27.5, -0.5]], - "BlueTriangle": [[12.5, 7.5, -0.5], [27.5, 7.5, -0.5], [20, 22.5, -0.5]] -} -``` diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_frame.md b/docs/user_guide/aruco_markers_pipeline/aoi_frame.md new file mode 100644 index 0000000..6b87d52 --- /dev/null +++ b/docs/user_guide/aruco_markers_pipeline/aoi_frame.md @@ -0,0 +1,64 @@ +Define an AOI as a frame +======================== + + + + +## Add ArFrame to ArUcoScene + +The [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) class defines a rectangular area where timestamped gaze positions are projected in and inside which they need to be analyzed. An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) instance can contains multiples [ArFrames](../../argaze.md/#argaze.ArFeatures.ArFrame). + +Here is the previous extract where the "GrayRectangle" AOI is defined as a frame into the [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) configuration: + +```json +{ + "name": "My FullHD camera", + "size": [1920, 1080], + ... + "scenes": { + "MyScene" : { + "aruco_markers_group": { + ... + }, + "layers": { + "MyLayer": { + "aoi_scene": { + "YellowSquare": [[6.2, -7.275252, 25.246159], [31.2, -7.275252, 25.246159], [31.2, 1.275252, 1.753843], [6.2, 1.275252, 1.753843]], + "GrayRectangle": [[2.5, 2.5, -0.5], [37.5, 2.5, -0.5], [37.5, 27.5, -0.5], [2.5, 27.5, -0.5]] + } + } + }, + "frames": { + "GrayRectangle": { + "size": [350, 250], + "layers": { + "MyLayer": { + "aoi_scene": { + "BlueTriangle": [[100, 50], [250, 50], [175, 200]] + } + } + } + } + } + } + } + ... +} +``` +Now, let's understand the meaning of each JSON entry. + +### "GrayRectangle" + +The name of the AOI and the [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame). Basically useful for visualisation purpose. + +!!! warning "Frame name policy" + + An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layer AOI is defined as an [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) frame, **provided they have the same name**. + +!!! warning "Layer name policy" + + An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) frame layer is projected into [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layer, **provided they have the same name**. + +!!! note + + [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) frame layers are projected into their dedicated [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layers when the JSON configuration file is loaded. \ No newline at end of file diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_projection.md b/docs/user_guide/aruco_markers_pipeline/aoi_projection.md deleted file mode 100644 index 2f764f8..0000000 --- a/docs/user_guide/aruco_markers_pipeline/aoi_projection.md +++ /dev/null @@ -1,159 +0,0 @@ -Project AOI into camera frame -============================= - -Once [ArUcoScene pose is estimated](pose_estimation.md) and [AOI are described](aoi_description.md), AOI can be projected into [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) frame. - -![AOI projection](../../img/aruco_camera_aoi_projection.png) - -## Add ArLayer to ArUcoScene to load AOI - -The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class allows to load areas of interest description. An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) instance can contains multiples [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer). - -Here is the previous extract where one layer is added to the [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) configuration: - -```json -{ - "name": "My FullHD camera", - "size": [1920, 1080], - ... - "scenes": { - "MyScene" : { - "aruco_markers_group": { - ... - }, - "layers": { - "MyLayer": { - "aoi_scene": { - "YellowSquare": [[6.2, -7.275252, 25.246159], [31.2, -7.275252, 25.246159], [31.2, 1.275252, 1.753843], [6.2, 1.275252, 1.753843]], - "GrayRectangle": [[2.5, 2.5, -0.5], [37.5, 2.5, -0.5], [37.5, 27.5, -0.5], [2.5, 27.5, -0.5]], - "BlueTriangle": [[12.5, 7.5, -0.5], [27.5, 7.5, -0.5], [20, 22.5, -0.5]] - } - } - } - } - } - ... -} -``` - -Now, let's understand the meaning of each JSON entry. - -### "MyLayer" - -The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose. - -### AOI Scene - -The [AOIScene](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AOIScene) defines a set of 3D [AreaOfInterest](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) registered by name. - -## Add ArLayer to ArUcoCamera to project AOI - -Here is the previous extract where one layer is added to the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) and displayed: - -```json -{ - "name": "My FullHD camera", - "size": [1920, 1080], - ... - "scenes": { - "MyScene" : { - "aruco_markers_group": { - ... - }, - "layers": { - "MyLayer": { - "aoi_scene": { - ... - } - } - } - } - }, - "layers": { - "MyLayer": {} - } - ... - "image_parameters": { - ... - "draw_layers": { - "MyLayer": { - "draw_aoi_scene": { - "draw_aoi": { - "color": [255, 255, 255], - "border_size": 1 - } - } - } - } - } -} -``` - -Now, let's understand the meaning of each JSON entry. - -### "MyLayer" - -The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose. - -!!! warning "Layer name policy" - - An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layer is projected into an [ ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layer, **provided they have the same name**. - -!!! note - - [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layers are projected into their dedicated [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layers when calling the [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method. - -## Add AOI analysis - -When a scene layer is projected into a camera layer, it means that the 3D [ArLayer.aoi_scene](../../argaze.md/#argaze.ArFeatures.ArLayer.aoi_scene) description of the scene becomes the 2D camera's [ArLayer.aoi_scene](../../argaze.md/#argaze.ArFeatures.ArLayer.aoi_scene) description of the camera. - -Therefore, it means that [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) benefits from all the services described in [AOI analysis pipeline section](../gaze_analysis_pipeline/aoi_analysis.md). - -Here is the previous extract where AOI matcher, AOI scan path and AOI scan path analyzers are added to the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layer: - -```json -{ - "name": "My FullHD camera", - "size": [1920, 1080], - ... - "scenes": { - "MyScene" : { - "aruco_markers_group": { - ... - }, - "layers": { - "MyLayer": { - "aoi_scene": { - ... - } - } - } - } - }, - "layers": { - "MyLayer": { - "aoi_matcher": { - "DeviationCircleCoverage": { - "coverage_threshold": 0.5 - } - }, - "aoi_scan_path": { - "duration_max": 30000 - }, - "aoi_scan_path_analyzers": { - "Basic": {}, - "TransitionMatrix": {}, - "NGram": { - "n_min": 3, - "n_max": 5 - } - } - } - } - ... -} -``` - -!!! warning - - Adding scan path and scan path analyzers to an [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layer doesn't make sense if the camera is moving. diff --git a/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md b/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md index 35b64f7..6bf84a9 100644 --- a/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md +++ b/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md @@ -135,4 +135,4 @@ Particularly, timestamped gaze positions can be passed one by one to [ArUcoCamer At this point, the [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method only detects ArUco markers and the [ArUcoCamera.look](../../argaze.md/#argaze.ArFeatures.ArCamera.look) method only process gaze movement identification without any AOI support as no scene description is provided into the JSON configuration file. - Read the next chapters to learn [how to estimate scene pose](pose_estimation.md) and [how to project AOI](aoi_projection.md). \ No newline at end of file + Read the next chapters to learn [how to estimate scene pose](pose_estimation.md) and [how to project AOI](aoi_3d_projection.md). \ No newline at end of file diff --git a/docs/user_guide/aruco_markers_pipeline/introduction.md b/docs/user_guide/aruco_markers_pipeline/introduction.md index dd82900..917245d 100644 --- a/docs/user_guide/aruco_markers_pipeline/introduction.md +++ b/docs/user_guide/aruco_markers_pipeline/introduction.md @@ -16,10 +16,10 @@ First, let's look at the schema below: it gives an overview of the main notions To build your own ArUco markers pipeline, you need to know: * [How to setup ArUco markers into a scene](aruco_markers_description.md), -* [How to describe scene's AOI](aoi_description.md), +* [How to describe scene's AOI](aoi_3d_description.md), * [How to load and execute ArUco markers pipeline](configuration_and_execution.md), * [How to estimate scene pose](pose_estimation.md), -* [How to project AOI into camera frame](aoi_projection.md), +* [How to project AOIs into camera frame](aoi_3d_projection.md), * [How to define an AOI as a frame](aoi_frame.md) More advanced features are also explained like: diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_2d_analysis.md b/docs/user_guide/gaze_analysis_pipeline/aoi_2d_analysis.md new file mode 100644 index 0000000..66763ad --- /dev/null +++ b/docs/user_guide/gaze_analysis_pipeline/aoi_2d_analysis.md @@ -0,0 +1,96 @@ +Enable 2D AOIs analysis +=================== + +The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class defines a space where to make matching of gaze movements with AOIs and inside which those matchings need to be analyzed. + +![Layer](../../img/ar_layer.png) + +## Add ArLayer to ArFrame JSON configuration file + +An [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) instance can contains multiples [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer). + +Here is an extract from the JSON [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) configuration file with a sample where one layer is added: + +```json +{ + "name": "My FullHD screen", + "size": [1920, 1080], + ... + "layers": { + "MyLayer": { + "aoi_scene" : { + "upper_left_area": [[0, 0], [960, 0], [960, 540], [0, 540]], + "upper_right_area": [[960, 0], [1920, 0], [1920, 540], [960, 540]], + "lower_left_area": [[0, 540], [960, 540], [960, 1080], [0, 1080]], + "lower_right_area": [[960, 540], [1920, 540], [1920, 1080], [960, 1080]] + }, + "aoi_matcher": { + "DeviationCircleCoverage": { + "coverage_threshold": 0.5 + } + }, + "aoi_scan_path": { + "duration_max": 30000 + }, + "aoi_scan_path_analyzers": { + "Basic": {}, + "TransitionMatrix": {}, + "NGram": { + "n_min": 3, + "n_max": 5 + } + } + } + } +} +``` + +!!! note + + Timestamped gaze movements identified by parent [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) are passed one by one to each [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). So, the execution of all [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer) is done during parent [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method call as explained in [previous chapter](configuration_and_execution.md). + +Now, let's understand the meaning of each JSON entry. + +### "MyLayer" + +The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose. + +### AOI Scene + +The [AOIScene](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AOIScene) defines a set of 2D [AreaOfInterest](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) registered by name. + +![AOI Scene](../../img/ar_layer_aoi_scene.png) + +### AOI Matcher + +The first [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step aims to make match identified gaze movement with an AOI of the scene. + +![AOI Matcher](../../img/ar_layer_aoi_matcher.png) + +The matching algorithm can be selected by instantiating a particular AOIMatcher [from GazeAnalysis submodule](pipeline_modules/aoi_matchers.md) or [from another python package](advanced_topics/module_loading.md). + +In the example file, the choosen matching algorithm is the [Deviation Circle Coverage](../../argaze.md/#argaze.GazeAnalysis.DeviationCircleCoverage) which has one specific *coverage_threshold* attribute. + +!!! warning "Mandatory" + JSON *aoi_matcher* entry is mandatory. Otherwise, the AOIScanPath and AOIScanPathAnalyzers steps are disabled. + +### AOI Scan Path + +The second [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step aims to build a [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) defined as a list of [AOIScanSteps](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) made by a set of successive fixations/saccades onto a same AOI. + +![AOI Scan Path](../../img/ar_layer_aoi_scan_path.png) + +Once identified gaze movements are matched to AOI, they are automatically appended to the AOIScanPath if required. + +The [AOIScanPath.duration_max](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.duration_max) attribute is the duration from which older AOI scan steps are removed each time new AOI scan steps are added. + +!!! note "Optional" + JSON *aoi_scan_path* entry is not mandatory. If aoi_scan_path_analyzers entry is not empty, the AOIScanPath step is automatically enabled. + +### AOI Scan Path Analyzers + +Finally, the last [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step consists in passing the previously built [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) to each loaded [AOIScanPathAnalyzer](../../argaze.md/#argaze.GazeFeatures.AOIScanPathAnalyzer). + +Each analysis algorithm can be selected by instantiating a particular AOIScanPathAnalyzer [from GazeAnalysis submodule](pipeline_modules/aoi_scan_path_analyzers.md) or [from another python package](advanced_topics/module_loading.md). + +In the example file, the choosen analysis algorithms are the [Basic](../../argaze.md/#argaze.GazeAnalysis.Basic) module, the [TransitionMatrix](../../argaze.md/#argaze.GazeAnalysis.TransitionMatrix) module and the [NGram](../../argaze.md/#argaze.GazeAnalysis.NGram) module which has two specific *n_min* and *n_max* attributes. diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md b/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md new file mode 100644 index 0000000..b2f0b90 --- /dev/null +++ b/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md @@ -0,0 +1,70 @@ +Describe 2D AOI +=============== + +Once [frame is configured](configuration_and_execution.md), areas of interest need to be described into the same 2D referential. + +![2D AOI description](../../img/aoi_2d_description.png) + +According a common computer graphics coordinates convention, all AOIs need to be described from a top left frame corner origin in a coordinate system where: + +* +X is pointing to the right, +* +Y is pointing to the downward. + +!!! warning + All AOIs spatial values must be given in **pixels**. + +### Edit SVG file description + +SVG file format could be exported from most vector graphics editors. + +``` xml + + + + + + + +``` + +Here are common SVG file features needed to describe AOIs: + +* *id* attribute indicates AOI name. +* *path* element describes any polygon using only [M, L and Z path intructions](https://www.w3.org/TR/SVG2/paths.html#PathData) +* *rect* and *circle* allow respectively to describe rectangular and circle AOI. + +### Edit JSON file description + +JSON file format allows to describe AOIs. + +``` json +{ + "Triangle" : [[1288.1, 189.466], [1991.24, 3399.34], [584.958, 3399.34]], + "BlueRectangle": { + "shape": "rectangle", + "x": 1257, + "y": 1905.18, + "width": 604.169, + "height": 988.564 + }, + "RedSquare": { + "shape": "rectangle", + "x": 623.609, + "y": 658.357, + "width": 803.15, + "height": 803.15 + }, + "GreenCircle": { + "shape": "circle", + "cx": 675.77, + "cy": 2163.5, + "radius": 393.109 + }, + "PinkCircle": { + "shape": "circle", + "cx": 1902.02, + "cy": 879.316, + "radius": 195.313 + } +} +``` diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md deleted file mode 100644 index 84730d4..0000000 --- a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md +++ /dev/null @@ -1,96 +0,0 @@ -Enable AOI analysis -=================== - -The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class defines a space where to make matching of gaze movements with AOIs and inside which those matchings need to be analyzed. - -![Layer](../../img/ar_layer.png) - -## Add ArLayer to ArFrame JSON configuration file - -An [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) instance can contains multiples [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer). - -Here is an extract from the JSON [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) configuration file with a sample where one layer is added: - -```json -{ - "name": "My FullHD screen", - "size": [1920, 1080], - ... - "layers": { - "MyLayer": { - "aoi_scene" : { - "upper_left_area": [[0, 0], [960, 0], [960, 540], [0, 540]], - "upper_right_area": [[960, 0], [1920, 0], [1920, 540], [960, 540]], - "lower_left_area": [[0, 540], [960, 540], [960, 1080], [0, 1080]], - "lower_right_area": [[960, 540], [1920, 540], [1920, 1080], [960, 1080]] - }, - "aoi_matcher": { - "DeviationCircleCoverage": { - "coverage_threshold": 0.5 - } - }, - "aoi_scan_path": { - "duration_max": 30000 - }, - "aoi_scan_path_analyzers": { - "Basic": {}, - "TransitionMatrix": {}, - "NGram": { - "n_min": 3, - "n_max": 5 - } - } - } - } -} -``` - -!!! note - - Timestamped gaze movements identified by parent [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) are passed one by one to each [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). So, the execution of all [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer) is done during parent [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method call as explained in [previous chapter](configuration_and_execution.md). - -Now, let's understand the meaning of each JSON entry. - -### "MyLayer" - -The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose. - -### AOI Scene - -The [AOIScene](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AOIScene) defines a set of 2D [AreaOfInterest](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) registered by name. - -![AOI Scene](../../img/ar_layer_aoi_scene.png) - -### AOI Matcher - -The first [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step aims to make match identified gaze movement with an AOI of the scene. - -![AOI Matcher](../../img/ar_layer_aoi_matcher.png) - -The matching algorithm can be selected by instantiating a particular AOIMatcher [from GazeAnalysis submodule](pipeline_modules/aoi_matchers.md) or [from another python package](advanced_topics/module_loading.md). - -In the example file, the choosen matching algorithm is the [Deviation Circle Coverage](../../argaze.md/#argaze.GazeAnalysis.DeviationCircleCoverage) which has one specific *coverage_threshold* attribute. - -!!! warning "Mandatory" - JSON *aoi_matcher* entry is mandatory. Otherwise, the AOIScanPath and AOIScanPathAnalyzers steps are disabled. - -### AOI Scan Path - -The second [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step aims to build a [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) defined as a list of [AOIScanSteps](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) made by a set of successive fixations/saccades onto a same AOI. - -![AOI Scan Path](../../img/ar_layer_aoi_scan_path.png) - -Once identified gaze movements are matched to AOI, they are automatically appended to the AOIScanPath if required. - -The [AOIScanPath.duration_max](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.duration_max) attribute is the duration from which older AOI scan steps are removed each time new AOI scan steps are added. - -!!! note "Optional" - JSON *aoi_scan_path* entry is not mandatory. If aoi_scan_path_analyzers entry is not empty, the AOIScanPath step is automatically enabled. - -### AOI Scan Path Analyzers - -Finally, the last [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step consists in passing the previously built [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) to each loaded [AOIScanPathAnalyzer](../../argaze.md/#argaze.GazeFeatures.AOIScanPathAnalyzer). - -Each analysis algorithm can be selected by instantiating a particular AOIScanPathAnalyzer [from GazeAnalysis submodule](pipeline_modules/aoi_scan_path_analyzers.md) or [from another python package](advanced_topics/module_loading.md). - -In the example file, the choosen analysis algorithms are the [Basic](../../argaze.md/#argaze.GazeAnalysis.Basic) module, the [TransitionMatrix](../../argaze.md/#argaze.GazeAnalysis.TransitionMatrix) module and the [NGram](../../argaze.md/#argaze.GazeAnalysis.NGram) module which has two specific *n_min* and *n_max* attributes. diff --git a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md index 5aca8f3..7657935 100644 --- a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md +++ b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md @@ -107,4 +107,4 @@ Timestamped gaze positions have to be passed one by one to [ArFrame.look](../../ At this point, the [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method only process gaze movement identification and scan path analysis without any AOI neither any logging or visualisation supports. - Read the next chapters to learn how to [add AOI analysis](aoi_analysis.md), [log gaze analysis](logging.md) and [visualize pipeline steps](visualisation.md). \ No newline at end of file + Read the next chapters to learn how to [add AOI analysis](aoi_2d_analysis.md), [log gaze analysis](logging.md) and [visualize pipeline steps](visualisation.md). \ No newline at end of file diff --git a/docs/user_guide/gaze_analysis_pipeline/introduction.md b/docs/user_guide/gaze_analysis_pipeline/introduction.md index 23b41a9..d33d308 100644 --- a/docs/user_guide/gaze_analysis_pipeline/introduction.md +++ b/docs/user_guide/gaze_analysis_pipeline/introduction.md @@ -11,7 +11,8 @@ To build your own gaze analysis pipeline, you need to know: * [How to edit timestamped gaze positions](timestamped_gaze_positions_edition.md), * [How to load and execute gaze analysis pipeline](configuration_and_execution.md), -* [How to enable AOI analysis](aoi_analysis.md), +* [How to describe frame's AOI](aoi_2d_description.md), +* [How to enable AOIs analysis](aoi_2d_analysis.md), * [How to visualize ArFrame and ArLayers](visualisation.md), * [How to log resulted gaze analysis](logging.md), * [How to make heatmap image](heatmap.md). diff --git a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_matchers.md b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_matchers.md index 61338cc..8ba751f 100644 --- a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_matchers.md +++ b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_matchers.md @@ -3,7 +3,7 @@ AOI matchers ArGaze provides ready-to-use AOI matching algorithms. -Here are JSON samples to include the chosen module inside [ArLayer configuration](../aoi_analysis.md) *aoi_matcher* entry. +Here are JSON samples to include the chosen module inside [ArLayer configuration](../aoi_2d_analysis.md) *aoi_matcher* entry. ## Deviation circle coverage diff --git a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_scan_path_analyzers.md b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_scan_path_analyzers.md index ad1832d..e395750 100644 --- a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_scan_path_analyzers.md +++ b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_scan_path_analyzers.md @@ -3,7 +3,7 @@ AOI scan path analyzers ArGaze provides ready-to-use AOI scan path analysis algorithms. -Here are JSON samples to include a chosen module inside [ArLayer configuration](../aoi_analysis.md) *aoi_scan_path_analyzers* entry. +Here are JSON samples to include a chosen module inside [ArLayer configuration](../aoi_2d_analysis.md) *aoi_scan_path_analyzers* entry. ## Basic metrics diff --git a/mkdocs.yml b/mkdocs.yml index c1c2af6..385ebef 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -8,7 +8,8 @@ nav: - user_guide/gaze_analysis_pipeline/introduction.md - user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md - user_guide/gaze_analysis_pipeline/configuration_and_execution.md - - user_guide/gaze_analysis_pipeline/aoi_analysis.md + - user_guide/gaze_analysis_pipeline/aoi_2d_description.md + - user_guide/gaze_analysis_pipeline/aoi_2d_analysis.md - user_guide/gaze_analysis_pipeline/visualisation.md - user_guide/gaze_analysis_pipeline/logging.md - user_guide/gaze_analysis_pipeline/heatmap.md @@ -24,10 +25,10 @@ nav: - ArUco markers pipeline: - user_guide/aruco_markers_pipeline/introduction.md - user_guide/aruco_markers_pipeline/aruco_markers_description.md - - user_guide/aruco_markers_pipeline/aoi_description.md + - user_guide/aruco_markers_pipeline/aoi_3d_description.md - user_guide/aruco_markers_pipeline/configuration_and_execution.md - user_guide/aruco_markers_pipeline/pose_estimation.md - - user_guide/aruco_markers_pipeline/aoi_projection.md + - user_guide/aruco_markers_pipeline/aoi_3d_projection.md - Advanced Topics: - user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md -- cgit v1.1 From 217d7ffb68ea4ebbc22cd914cf37d24ce3bcc566 Mon Sep 17 00:00:00 2001 From: Theo De La Hogue Date: Mon, 25 Sep 2023 14:46:46 +0200 Subject: Adding a way to load SVG AOI description. Allowing to use shape to describe rectangular or circular 2D AOI in JSON. --- src/argaze/ArFeatures.py | 5 +++ src/argaze/AreaOfInterest/AOI2DScene.py | 62 ++++++++++++++++++++++++++++++++ src/argaze/AreaOfInterest/AOIFeatures.py | 48 +++++++++++++++++++++++-- 3 files changed, 112 insertions(+), 3 deletions(-) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index a419d93..0750cb5 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -180,6 +180,11 @@ class ArLayer(): new_aoi_scene = AOIFeatures.AOIScene.from_json(filepath) + # SVG file format for 2D dimension only + if file_format == 'svg': + + new_aoi_scene = AOIFeatures.AOI2DScene.from_svg(filepath) + # OBJ file format for 3D dimension only elif file_format == 'obj': diff --git a/src/argaze/AreaOfInterest/AOI2DScene.py b/src/argaze/AreaOfInterest/AOI2DScene.py index 564f65c..4dc47f4 100644 --- a/src/argaze/AreaOfInterest/AOI2DScene.py +++ b/src/argaze/AreaOfInterest/AOI2DScene.py @@ -15,6 +15,7 @@ from argaze import GazeFeatures import cv2 import numpy +from xml.dom import minidom AOI2DSceneType = TypeVar('AOI2DScene', bound="AOI2DScene") # Type definition for type annotation convenience @@ -29,6 +30,67 @@ class AOI2DScene(AOIFeatures.AOIScene): super().__init__(2, aois_2d) + @classmethod + def from_svg(self, svg_filepath: str) -> AOI2DSceneType: + """ + Load areas from .svg file. + + Parameters: + svg_filepath: path to svg file + + !!! note + Available SVG elements are: path, rect and circle. + + !!! warning + Available SVG path d-string commands are: MoveTo (M) LineTo (L) and ClosePath (Z) commands. + """ + + with minidom.parse(svg_filepath) as description_file: + + new_areas = {} + + # Load SVG path + for path in description_file.getElementsByTagName('path'): + + # Convert d-string into array + d_string = path.getAttribute('d') + + assert(d_string[0] == 'M') + assert(d_string[-1] == 'Z') + + points = [(float(x), float(y)) for x, y in [p.split(',') for p in d_string[1:-1].split('L')]] + + new_areas[path.getAttribute('id')] = AOIFeatures.AreaOfInterest(points) + + # Load SVG rect + for rect in description_file.getElementsByTagName('rect'): + + # Convert rect element into dict + rect_dict = { + 'shape': 'rectangle', + 'x': float(rect.getAttribute('x')), + 'y': float(rect.getAttribute('y')), + 'width': float(rect.getAttribute('width')), + 'height': float(rect.getAttribute('height')) + } + + new_areas[rect.getAttribute('id')] = AOIFeatures.AreaOfInterest.from_dict(rect_dict) + + # Load SVG circle + for circle in description_file.getElementsByTagName('circle'): + + # Convert circle element into dict + circle_dict = { + 'shape': 'circle', + 'cx': float(circle.getAttribute('cx')), + 'cy': float(circle.getAttribute('cy')), + 'radius': float(circle.getAttribute('r')) + } + + new_areas[circle.getAttribute('id')] = AOIFeatures.AreaOfInterest.from_dict(circle_dict) + + return AOI2DScene(new_areas) + def draw(self, image: numpy.array, draw_aoi: dict = None, exclude=[]): """Draw AOI polygons on image. diff --git a/src/argaze/AreaOfInterest/AOIFeatures.py b/src/argaze/AreaOfInterest/AOIFeatures.py index ffaf882..debf1fa 100644 --- a/src/argaze/AreaOfInterest/AOIFeatures.py +++ b/src/argaze/AreaOfInterest/AOIFeatures.py @@ -11,6 +11,7 @@ from typing import TypeVar, Tuple from dataclasses import dataclass, field import json import os +import math from argaze import DataStructures @@ -41,6 +42,40 @@ class AreaOfInterest(numpy.ndarray): return repr(self.tolist()) + @classmethod + def from_dict(self, aoi_data: dict, working_directory: str = None) -> AreaOfInterestType: + """Load attributes from dictionary. + + Parameters: + aoi_data: dictionary with attributes to load + working_directory: folder path where to load files when a dictionary value is a relative filepath. + """ + + shape = aoi_data.pop('shape') + + if shape == 'rectangle': + + x = aoi_data.pop('x') + y = aoi_data.pop('y') + width = aoi_data.pop('width') + height = aoi_data.pop('height') + + points = [[x, y], [x+width, y], [x+width, y+height], [x, y+height]] + + return AreaOfInterest(points) + + elif shape == 'circle': + + cx = aoi_data.pop('cx') + cy = aoi_data.pop('cy') + radius = aoi_data.pop('radius') + + # TODO: Use pygeos + N = 32 + points = [(math.cos(2*math.pi / N*x) * radius + cx, math.sin(2*math.pi / N*x) * radius + cy) for x in range(0, N+1)] + + return AreaOfInterest(points) + @property def dimension(self) -> int: """Number of axis coding area points positions.""" @@ -249,8 +284,15 @@ class AOIScene(): # Load areas areas = {} - for name, area in aoi_scene_data.items(): - areas[name] = AreaOfInterest(area) + for area_name, area_data in aoi_scene_data.items(): + + if type(area_data) == list: + + areas[area_name] = AreaOfInterest(area_data) + + elif type(area_data) == dict: + + areas[area_name] = AreaOfInterest.from_dict(area_data) # Default dimension is 0 dimension = 0 @@ -276,7 +318,7 @@ class AOIScene(): aoi_scene_data = json.load(configuration_file) working_directory = os.path.dirname(json_filepath) - return AOIScene.from_dict(aoi_scene_data, working_directory) + return AOIScene.from_dict(aoi_scene_data, working_directory) def __getitem__(self, name) -> AreaOfInterest: """Get an AOI from the scene.""" -- cgit v1.1 From 08791230814241baf2283c07cc6b16b00dcccca4 Mon Sep 17 00:00:00 2001 From: Theo De La Hogue Date: Tue, 26 Sep 2023 10:42:41 +0200 Subject: Changing JSON description schema. --- src/argaze/AreaOfInterest/AOIFeatures.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/src/argaze/AreaOfInterest/AOIFeatures.py b/src/argaze/AreaOfInterest/AOIFeatures.py index debf1fa..dfbb165 100644 --- a/src/argaze/AreaOfInterest/AOIFeatures.py +++ b/src/argaze/AreaOfInterest/AOIFeatures.py @@ -51,24 +51,26 @@ class AreaOfInterest(numpy.ndarray): working_directory: folder path where to load files when a dictionary value is a relative filepath. """ - shape = aoi_data.pop('shape') + # Get first and unique shape + # TODO: allow multiple shapes to describe more complex AOI + shape, shape_data = aoi_data.popitem() - if shape == 'rectangle': + if shape == 'Rectangle': - x = aoi_data.pop('x') - y = aoi_data.pop('y') - width = aoi_data.pop('width') - height = aoi_data.pop('height') + x = shape_data.pop('x') + y = shape_data.pop('y') + width = shape_data.pop('width') + height = shape_data.pop('height') points = [[x, y], [x+width, y], [x+width, y+height], [x, y+height]] return AreaOfInterest(points) - elif shape == 'circle': + elif shape == 'Circle': - cx = aoi_data.pop('cx') - cy = aoi_data.pop('cy') - radius = aoi_data.pop('radius') + cx = shape_data.pop('cx') + cy = shape_data.pop('cy') + radius = shape_data.pop('radius') # TODO: Use pygeos N = 32 @@ -462,7 +464,7 @@ class AOIScene(): @property def dimension(self) -> int: - """Dimension of the AOIs in scene.""" + """Dimension of the AOI in scene.""" return self.__dimension -- cgit v1.1 From 12ae7e20aba323624d360567ea424ac2d315fbc7 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 10:47:43 +0200 Subject: Harmonizing AOI/aoi without s at the end. --- docs/index.md | 2 +- .../aruco_markers_pipeline/aoi_3d_description.md | 8 +- .../aruco_markers_pipeline/aoi_3d_projection.md | 8 +- .../aruco_markers_pipeline/introduction.md | 2 +- .../gaze_analysis_pipeline/aoi_2d_analysis.md | 96 ---------------------- .../gaze_analysis_pipeline/aoi_2d_description.md | 52 ++++++------ .../gaze_analysis_pipeline/aoi_analysis.md | 96 ++++++++++++++++++++++ .../configuration_and_execution.md | 2 +- .../gaze_analysis_pipeline/introduction.md | 2 +- .../pipeline_modules/aoi_matchers.md | 2 +- .../pipeline_modules/aoi_scan_path_analyzers.md | 2 +- mkdocs.yml | 2 +- src/argaze.test/AreaOfInterest/AOI2DScene.py | 6 +- src/argaze.test/AreaOfInterest/AOI3DScene.py | 6 +- src/argaze.test/GazeFeatures.py | 4 +- src/argaze/ArFeatures.py | 24 +++--- src/argaze/AreaOfInterest/AOI2DScene.py | 6 +- src/argaze/AreaOfInterest/AOI3DScene.py | 10 +-- src/argaze/GazeAnalysis/DeviationCircleCoverage.py | 14 ++-- src/argaze/GazeAnalysis/TransitionMatrix.py | 2 +- src/argaze/GazeFeatures.py | 22 ++--- 21 files changed, 186 insertions(+), 182 deletions(-) delete mode 100644 docs/user_guide/gaze_analysis_pipeline/aoi_2d_analysis.md create mode 100644 docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md diff --git a/docs/index.md b/docs/index.md index 2306490..f234a94 100644 --- a/docs/index.md +++ b/docs/index.md @@ -18,7 +18,7 @@ First of all, **ArGaze** provides extensible modules library allowing to select * **Area Of Interest (AOI) matching**: focus point inside, deviation circle coverage, ... * **Scan path analysis**: transition matrix, entropy, exploit/explore ratio, ... -Once incoming data formatted as required, all those gaze analysis features can be used with any screen-based eye tracker devices. +Once incoming data are formatted as required, all those gaze analysis features can be used with any screen-based eye tracker devices. [Learn how to build gaze analysis pipelines for various use cases by reading user guide dedicated section](./user_guide/gaze_analysis_pipeline/introduction.md). diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md index 13f9c86..a2bb8d7 100644 --- a/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md +++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md @@ -7,14 +7,14 @@ In the example scene, each screen is considered as an area of interest more the ![3D AOI description](../../img/aoi_3d_description.png) -All AOIs need to be described from same origin than markers in a [right-handed 3D axis](https://robotacademy.net.au/lesson/right-handed-3d-coordinate-frame/) where: +All AOI need to be described from same origin than markers in a [right-handed 3D axis](https://robotacademy.net.au/lesson/right-handed-3d-coordinate-frame/) where: * +X is pointing to the right, * +Y is pointing to the top, * +Z is pointing to the backward. !!! warning - All AOIs spatial values must be given in **centimeters**. + All AOI spatial values must be given in **centimeters**. ### Edit OBJ file description @@ -43,7 +43,7 @@ s off f 9 10 11 ``` -Here are common OBJ file features needed to describe AOIs: +Here are common OBJ file features needed to describe AOI: * Object lines (starting with *o* key) indicate AOI name. * Vertice lines (starting with *v* key) indicate AOI vertices. @@ -51,7 +51,7 @@ Here are common OBJ file features needed to describe AOIs: ### Edit JSON file description -JSON file format allows to describe AOIs vertices. +JSON file format allows to describe AOI vertices. ``` json { diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md index bdebd6c..d7df765 100644 --- a/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md +++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md @@ -1,7 +1,7 @@ Project AOI into camera frame ============================= -Once [ArUcoScene pose is estimated](pose_estimation.md) and [3D AOIs are described](aoi_3d_description.md), AOIs can be projected into [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) frame. +Once [ArUcoScene pose is estimated](pose_estimation.md) and [3D AOI are described](aoi_3d_description.md), AOI can be projected into [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) frame. ![3D AOI projection](../../img/aruco_camera_aoi_projection.png) @@ -46,7 +46,7 @@ The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically The [AOIScene](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AOIScene) defines a set of 3D [AreaOfInterest](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) registered by name. -## Add ArLayer to ArUcoCamera to project 3D AOIs +## Add ArLayer to ArUcoCamera to project 3D AOI Here is the previous extract where one layer is added to the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) and displayed: @@ -103,11 +103,11 @@ The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layers are projected into their dedicated [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layers when calling the [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method. -## Add 2D AOIs analysis +## Add 2D AOI analysis When a scene layer is projected into a camera layer, it means that the 3D [ArLayer.aoi_scene](../../argaze.md/#argaze.ArFeatures.ArLayer.aoi_scene) description of the scene becomes the 2D camera's [ArLayer.aoi_scene](../../argaze.md/#argaze.ArFeatures.ArLayer.aoi_scene) description of the camera. -Therefore, it means that [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) benefits from all the services described in [2D AOIs analysis pipeline section](../gaze_analysis_pipeline/aoi_2d_analysis.md). +Therefore, it means that [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) benefits from all the services described in [2D AOI analysis pipeline section](../gaze_analysis_pipeline/aoi_analysis.md). Here is the previous extract where AOI matcher, AOI scan path and AOI scan path analyzers are added to the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layer: diff --git a/docs/user_guide/aruco_markers_pipeline/introduction.md b/docs/user_guide/aruco_markers_pipeline/introduction.md index 917245d..dc3aa4a 100644 --- a/docs/user_guide/aruco_markers_pipeline/introduction.md +++ b/docs/user_guide/aruco_markers_pipeline/introduction.md @@ -19,7 +19,7 @@ To build your own ArUco markers pipeline, you need to know: * [How to describe scene's AOI](aoi_3d_description.md), * [How to load and execute ArUco markers pipeline](configuration_and_execution.md), * [How to estimate scene pose](pose_estimation.md), -* [How to project AOIs into camera frame](aoi_3d_projection.md), +* [How to project AOI into camera frame](aoi_3d_projection.md), * [How to define an AOI as a frame](aoi_frame.md) More advanced features are also explained like: diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_2d_analysis.md b/docs/user_guide/gaze_analysis_pipeline/aoi_2d_analysis.md deleted file mode 100644 index 66763ad..0000000 --- a/docs/user_guide/gaze_analysis_pipeline/aoi_2d_analysis.md +++ /dev/null @@ -1,96 +0,0 @@ -Enable 2D AOIs analysis -=================== - -The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class defines a space where to make matching of gaze movements with AOIs and inside which those matchings need to be analyzed. - -![Layer](../../img/ar_layer.png) - -## Add ArLayer to ArFrame JSON configuration file - -An [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) instance can contains multiples [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer). - -Here is an extract from the JSON [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) configuration file with a sample where one layer is added: - -```json -{ - "name": "My FullHD screen", - "size": [1920, 1080], - ... - "layers": { - "MyLayer": { - "aoi_scene" : { - "upper_left_area": [[0, 0], [960, 0], [960, 540], [0, 540]], - "upper_right_area": [[960, 0], [1920, 0], [1920, 540], [960, 540]], - "lower_left_area": [[0, 540], [960, 540], [960, 1080], [0, 1080]], - "lower_right_area": [[960, 540], [1920, 540], [1920, 1080], [960, 1080]] - }, - "aoi_matcher": { - "DeviationCircleCoverage": { - "coverage_threshold": 0.5 - } - }, - "aoi_scan_path": { - "duration_max": 30000 - }, - "aoi_scan_path_analyzers": { - "Basic": {}, - "TransitionMatrix": {}, - "NGram": { - "n_min": 3, - "n_max": 5 - } - } - } - } -} -``` - -!!! note - - Timestamped gaze movements identified by parent [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) are passed one by one to each [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). So, the execution of all [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer) is done during parent [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method call as explained in [previous chapter](configuration_and_execution.md). - -Now, let's understand the meaning of each JSON entry. - -### "MyLayer" - -The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose. - -### AOI Scene - -The [AOIScene](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AOIScene) defines a set of 2D [AreaOfInterest](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) registered by name. - -![AOI Scene](../../img/ar_layer_aoi_scene.png) - -### AOI Matcher - -The first [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step aims to make match identified gaze movement with an AOI of the scene. - -![AOI Matcher](../../img/ar_layer_aoi_matcher.png) - -The matching algorithm can be selected by instantiating a particular AOIMatcher [from GazeAnalysis submodule](pipeline_modules/aoi_matchers.md) or [from another python package](advanced_topics/module_loading.md). - -In the example file, the choosen matching algorithm is the [Deviation Circle Coverage](../../argaze.md/#argaze.GazeAnalysis.DeviationCircleCoverage) which has one specific *coverage_threshold* attribute. - -!!! warning "Mandatory" - JSON *aoi_matcher* entry is mandatory. Otherwise, the AOIScanPath and AOIScanPathAnalyzers steps are disabled. - -### AOI Scan Path - -The second [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step aims to build a [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) defined as a list of [AOIScanSteps](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) made by a set of successive fixations/saccades onto a same AOI. - -![AOI Scan Path](../../img/ar_layer_aoi_scan_path.png) - -Once identified gaze movements are matched to AOI, they are automatically appended to the AOIScanPath if required. - -The [AOIScanPath.duration_max](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.duration_max) attribute is the duration from which older AOI scan steps are removed each time new AOI scan steps are added. - -!!! note "Optional" - JSON *aoi_scan_path* entry is not mandatory. If aoi_scan_path_analyzers entry is not empty, the AOIScanPath step is automatically enabled. - -### AOI Scan Path Analyzers - -Finally, the last [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step consists in passing the previously built [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) to each loaded [AOIScanPathAnalyzer](../../argaze.md/#argaze.GazeFeatures.AOIScanPathAnalyzer). - -Each analysis algorithm can be selected by instantiating a particular AOIScanPathAnalyzer [from GazeAnalysis submodule](pipeline_modules/aoi_scan_path_analyzers.md) or [from another python package](advanced_topics/module_loading.md). - -In the example file, the choosen analysis algorithms are the [Basic](../../argaze.md/#argaze.GazeAnalysis.Basic) module, the [TransitionMatrix](../../argaze.md/#argaze.GazeAnalysis.TransitionMatrix) module and the [NGram](../../argaze.md/#argaze.GazeAnalysis.NGram) module which has two specific *n_min* and *n_max* attributes. diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md b/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md index b2f0b90..6cca7ce 100644 --- a/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md +++ b/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md @@ -1,17 +1,17 @@ Describe 2D AOI -=============== +================ -Once [frame is configured](configuration_and_execution.md), areas of interest need to be described into the same 2D referential. +Once [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) is [configured](configuration_and_execution.md), areas of interest need to be described to know what is looked in frame. ![2D AOI description](../../img/aoi_2d_description.png) -According a common computer graphics coordinates convention, all AOIs need to be described from a top left frame corner origin in a coordinate system where: +According common computer graphics coordinates convention, all AOI need to be described from a top left frame corner origin with a coordinate system where: * +X is pointing to the right, * +Y is pointing to the downward. !!! warning - All AOIs spatial values must be given in **pixels**. + All AOI spatial values must be given in **pixels**. ### Edit SVG file description @@ -27,7 +27,7 @@ SVG file format could be exported from most vector graphics editors. ``` -Here are common SVG file features needed to describe AOIs: +Here are common SVG file features needed to describe AOI: * *id* attribute indicates AOI name. * *path* element describes any polygon using only [M, L and Z path intructions](https://www.w3.org/TR/SVG2/paths.html#PathData) @@ -35,36 +35,40 @@ Here are common SVG file features needed to describe AOIs: ### Edit JSON file description -JSON file format allows to describe AOIs. +JSON file format allows to describe AOI. ``` json { "Triangle" : [[1288.1, 189.466], [1991.24, 3399.34], [584.958, 3399.34]], "BlueRectangle": { - "shape": "rectangle", - "x": 1257, - "y": 1905.18, - "width": 604.169, - "height": 988.564 + "Rectangle": { + "x": 1257, + "y": 1905.18, + "width": 604.169, + "height": 988.564 + } }, "RedSquare": { - "shape": "rectangle", - "x": 623.609, - "y": 658.357, - "width": 803.15, - "height": 803.15 + "Rectangle": { + "x": 623.609, + "y": 658.357, + "width": 803.15, + "height": 803.15 + } }, "GreenCircle": { - "shape": "circle", - "cx": 675.77, - "cy": 2163.5, - "radius": 393.109 + "Circle": { + "cx": 675.77, + "cy": 2163.5, + "radius": 393.109 + } }, "PinkCircle": { - "shape": "circle", - "cx": 1902.02, - "cy": 879.316, - "radius": 195.313 + "Circle": { + "cx": 1902.02, + "cy": 879.316, + "radius": 195.313 + } } } ``` diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md new file mode 100644 index 0000000..cce3fcb --- /dev/null +++ b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md @@ -0,0 +1,96 @@ +Enable AOI analysis +=================== + +The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class defines a space where to make matching of gaze movements with AOI and inside which those matchings need to be analyzed. + +![Layer](../../img/ar_layer.png) + +## Add ArLayer to ArFrame JSON configuration file + +An [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) instance can contains multiples [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer). + +Here is an extract from the JSON [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) configuration file with a sample where one layer is added: + +```json +{ + "name": "My FullHD screen", + "size": [1920, 1080], + ... + "layers": { + "MyLayer": { + "aoi_scene" : { + "upper_left_area": [[0, 0], [960, 0], [960, 540], [0, 540]], + "upper_right_area": [[960, 0], [1920, 0], [1920, 540], [960, 540]], + "lower_left_area": [[0, 540], [960, 540], [960, 1080], [0, 1080]], + "lower_right_area": [[960, 540], [1920, 540], [1920, 1080], [960, 1080]] + }, + "aoi_matcher": { + "DeviationCircleCoverage": { + "coverage_threshold": 0.5 + } + }, + "aoi_scan_path": { + "duration_max": 30000 + }, + "aoi_scan_path_analyzers": { + "Basic": {}, + "TransitionMatrix": {}, + "NGram": { + "n_min": 3, + "n_max": 5 + } + } + } + } +} +``` + +!!! note + + Timestamped gaze movements identified by parent [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) are passed one by one to each [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). So, the execution of all [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer) is done during parent [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method call as explained in [previous chapter](configuration_and_execution.md). + +Now, let's understand the meaning of each JSON entry. + +### "MyLayer" + +The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose. + +### AOI Scene + +The [AOIScene](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AOIScene) defines a set of 2D [AreaOfInterest](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) registered by name. + +![AOI Scene](../../img/ar_layer_aoi_scene.png) + +### AOI Matcher + +The first [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step aims to make match identified gaze movement with an AOI of the scene. + +![AOI Matcher](../../img/ar_layer_aoi_matcher.png) + +The matching algorithm can be selected by instantiating a particular AOIMatcher [from GazeAnalysis submodule](pipeline_modules/aoi_matchers.md) or [from another python package](advanced_topics/module_loading.md). + +In the example file, the choosen matching algorithm is the [Deviation Circle Coverage](../../argaze.md/#argaze.GazeAnalysis.DeviationCircleCoverage) which has one specific *coverage_threshold* attribute. + +!!! warning "Mandatory" + JSON *aoi_matcher* entry is mandatory. Otherwise, the AOIScanPath and AOIScanPathAnalyzers steps are disabled. + +### AOI Scan Path + +The second [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step aims to build a [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) defined as a list of [AOIScanSteps](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) made by a set of successive fixations/saccades onto a same AOI. + +![AOI Scan Path](../../img/ar_layer_aoi_scan_path.png) + +Once identified gaze movements are matched to AOI, they are automatically appended to the AOIScanPath if required. + +The [AOIScanPath.duration_max](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.duration_max) attribute is the duration from which older AOI scan steps are removed each time new AOI scan steps are added. + +!!! note "Optional" + JSON *aoi_scan_path* entry is not mandatory. If aoi_scan_path_analyzers entry is not empty, the AOIScanPath step is automatically enabled. + +### AOI Scan Path Analyzers + +Finally, the last [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step consists in passing the previously built [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) to each loaded [AOIScanPathAnalyzer](../../argaze.md/#argaze.GazeFeatures.AOIScanPathAnalyzer). + +Each analysis algorithm can be selected by instantiating a particular AOIScanPathAnalyzer [from GazeAnalysis submodule](pipeline_modules/aoi_scan_path_analyzers.md) or [from another python package](advanced_topics/module_loading.md). + +In the example file, the choosen analysis algorithms are the [Basic](../../argaze.md/#argaze.GazeAnalysis.Basic) module, the [TransitionMatrix](../../argaze.md/#argaze.GazeAnalysis.TransitionMatrix) module and the [NGram](../../argaze.md/#argaze.GazeAnalysis.NGram) module which has two specific *n_min* and *n_max* attributes. diff --git a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md index 7657935..bb8eeaa 100644 --- a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md +++ b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md @@ -107,4 +107,4 @@ Timestamped gaze positions have to be passed one by one to [ArFrame.look](../../ At this point, the [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method only process gaze movement identification and scan path analysis without any AOI neither any logging or visualisation supports. - Read the next chapters to learn how to [add AOI analysis](aoi_2d_analysis.md), [log gaze analysis](logging.md) and [visualize pipeline steps](visualisation.md). \ No newline at end of file + Read the next chapters to learn how to [describe frame's AOI](aoi_2d_description.md), [add AOI analysis](aoi_analysis.md), [log gaze analysis](logging.md) and [visualize pipeline steps](visualisation.md). \ No newline at end of file diff --git a/docs/user_guide/gaze_analysis_pipeline/introduction.md b/docs/user_guide/gaze_analysis_pipeline/introduction.md index d33d308..76a146c 100644 --- a/docs/user_guide/gaze_analysis_pipeline/introduction.md +++ b/docs/user_guide/gaze_analysis_pipeline/introduction.md @@ -12,7 +12,7 @@ To build your own gaze analysis pipeline, you need to know: * [How to edit timestamped gaze positions](timestamped_gaze_positions_edition.md), * [How to load and execute gaze analysis pipeline](configuration_and_execution.md), * [How to describe frame's AOI](aoi_2d_description.md), -* [How to enable AOIs analysis](aoi_2d_analysis.md), +* [How to enable AOI analysis](aoi_analysis.md), * [How to visualize ArFrame and ArLayers](visualisation.md), * [How to log resulted gaze analysis](logging.md), * [How to make heatmap image](heatmap.md). diff --git a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_matchers.md b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_matchers.md index 8ba751f..61338cc 100644 --- a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_matchers.md +++ b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_matchers.md @@ -3,7 +3,7 @@ AOI matchers ArGaze provides ready-to-use AOI matching algorithms. -Here are JSON samples to include the chosen module inside [ArLayer configuration](../aoi_2d_analysis.md) *aoi_matcher* entry. +Here are JSON samples to include the chosen module inside [ArLayer configuration](../aoi_analysis.md) *aoi_matcher* entry. ## Deviation circle coverage diff --git a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_scan_path_analyzers.md b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_scan_path_analyzers.md index e395750..ad1832d 100644 --- a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_scan_path_analyzers.md +++ b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/aoi_scan_path_analyzers.md @@ -3,7 +3,7 @@ AOI scan path analyzers ArGaze provides ready-to-use AOI scan path analysis algorithms. -Here are JSON samples to include a chosen module inside [ArLayer configuration](../aoi_2d_analysis.md) *aoi_scan_path_analyzers* entry. +Here are JSON samples to include a chosen module inside [ArLayer configuration](../aoi_analysis.md) *aoi_scan_path_analyzers* entry. ## Basic metrics diff --git a/mkdocs.yml b/mkdocs.yml index 385ebef..3c5f10c 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -9,7 +9,7 @@ nav: - user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md - user_guide/gaze_analysis_pipeline/configuration_and_execution.md - user_guide/gaze_analysis_pipeline/aoi_2d_description.md - - user_guide/gaze_analysis_pipeline/aoi_2d_analysis.md + - user_guide/gaze_analysis_pipeline/aoi_analysis.md - user_guide/gaze_analysis_pipeline/visualisation.md - user_guide/gaze_analysis_pipeline/logging.md - user_guide/gaze_analysis_pipeline/heatmap.md diff --git a/src/argaze.test/AreaOfInterest/AOI2DScene.py b/src/argaze.test/AreaOfInterest/AOI2DScene.py index 4e96e98..10ff430 100644 --- a/src/argaze.test/AreaOfInterest/AOI2DScene.py +++ b/src/argaze.test/AreaOfInterest/AOI2DScene.py @@ -187,14 +187,14 @@ class TestTimeStampedAOIScenesClass(unittest.TestCase): aoi_2D_B = AOIFeatures.AreaOfInterest([[1, 1], [1, 2], [2, 2], [2, 1]]) aoi_2d_scene = AOI2DScene.AOI2DScene({"A": aoi_2D_A, "B": aoi_2D_B}) - ts_aois_scenes = AOIFeatures.TimeStampedAOIScenes() + ts_aoi_scenes = AOIFeatures.TimeStampedAOIScenes() - ts_aois_scenes[0] = aoi_2d_scene + ts_aoi_scenes[0] = aoi_2d_scene # Check that only AOIScene can be added with self.assertRaises(AssertionError): - ts_aois_scenes[1] = "This string is not an AOI2DScene" + ts_aoi_scenes[1] = "This string is not an AOI2DScene" if __name__ == '__main__': diff --git a/src/argaze.test/AreaOfInterest/AOI3DScene.py b/src/argaze.test/AreaOfInterest/AOI3DScene.py index b386432..d09f2a8 100644 --- a/src/argaze.test/AreaOfInterest/AOI3DScene.py +++ b/src/argaze.test/AreaOfInterest/AOI3DScene.py @@ -107,14 +107,14 @@ class TestTimeStampedAOIScenesClass(unittest.TestCase): aoi_3D_B = AOIFeatures.AreaOfInterest([[1, 1, 0], [1, 2, 0], [2, 2, 0], [2, 1, 0]]) aoi_3d_scene = AOI3DScene.AOI3DScene({"A": aoi_3D_A, "B": aoi_3D_B}) - ts_aois_scenes = AOIFeatures.TimeStampedAOIScenes() + ts_aoi_scenes = AOIFeatures.TimeStampedAOIScenes() - ts_aois_scenes[0] = aoi_3d_scene + ts_aoi_scenes[0] = aoi_3d_scene # Check that only AOIScene can be added with self.assertRaises(AssertionError): - ts_aois_scenes[1] = "This string is not an AOI3DScene" + ts_aoi_scenes[1] = "This string is not an AOI3DScene" if __name__ == '__main__': diff --git a/src/argaze.test/GazeFeatures.py b/src/argaze.test/GazeFeatures.py index d609dd2..b41c7c7 100644 --- a/src/argaze.test/GazeFeatures.py +++ b/src/argaze.test/GazeFeatures.py @@ -497,10 +497,10 @@ class TestAOIScanStepClass(unittest.TestCase): aoi_scan_step = GazeFeatures.AOIScanStep(movements, 'Test') -def build_aoi_scan_path(expected_aois, aoi_path): +def build_aoi_scan_path(expected_aoi, aoi_path): """Build AOI scan path""" - aoi_scan_path = GazeFeatures.AOIScanPath(expected_aois) + aoi_scan_path = GazeFeatures.AOIScanPath(expected_aoi) # Append a hidden last step to allow last given step creation aoi_path.append(aoi_path[-2]) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 0750cb5..122efe8 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -96,7 +96,7 @@ DEFAULT_ARLAYER_DRAW_PARAMETERS = { @dataclass class ArLayer(): """ - Defines a space where to make matching of gaze movements and AOIs and inside which those matchings need to be analyzed. + Defines a space where to make matching of gaze movements and AOI and inside which those matchings need to be analyzed. Parameters: name: name of the layer @@ -203,10 +203,10 @@ class ArLayer(): new_aoi_scene = AOI2DScene.AOI2DScene() # Edit expected AOI list by removing AOI with name equals to layer name - expected_aois = list(new_aoi_scene.keys()) + expected_aoi = list(new_aoi_scene.keys()) - if new_layer_name in expected_aois: - expected_aois.remove(new_layer_name) + if new_layer_name in expected_aoi: + expected_aoi.remove(new_layer_name) # Load aoi matcher try: @@ -230,13 +230,13 @@ class ArLayer(): try: new_aoi_scan_path_data = layer_data.pop('aoi_scan_path') - new_aoi_scan_path_data['expected_aois'] = expected_aois + new_aoi_scan_path_data['expected_aoi'] = expected_aoi new_aoi_scan_path = GazeFeatures.AOIScanPath(**new_aoi_scan_path_data) except KeyError: new_aoi_scan_path_data = {} - new_aoi_scan_path_data['expected_aois'] = expected_aois + new_aoi_scan_path_data['expected_aoi'] = expected_aoi new_aoi_scan_path = None # Load AOI scan path analyzers @@ -1208,7 +1208,7 @@ class ArScene(): # Check that the frame have a layer named like this scene layer aoi_2d_scene = new_frame.layers[scene_layer_name].aoi_scene - # Transform 2D frame layer AOIs into 3D scene layer AOIs + # Transform 2D frame layer AOI into 3D scene layer AOI # Then, add them to scene layer scene_layer.aoi_scene |= aoi_2d_scene.dimensionalize(frame_3d, new_frame.size) @@ -1228,12 +1228,12 @@ class ArScene(): if frame_layer.aoi_scan_path is not None: # Edit expected AOI list by removing AOI with name equals to frame layer name - expected_aois = list(layer.aoi_scene.keys()) + expected_aoi = list(layer.aoi_scene.keys()) - if frame_layer_name in expected_aois: - expected_aois.remove(frame_layer_name) + if frame_layer_name in expected_aoi: + expected_aoi.remove(frame_layer_name) - frame_layer.aoi_scan_path.expected_aois = expected_aois + frame_layer.aoi_scan_path.expected_aoi = expected_aoi except KeyError: @@ -1353,7 +1353,7 @@ class ArCamera(ArFrame): continue - layer.aoi_scan_path.expected_aois = all_aoi_list + layer.aoi_scan_path.expected_aoi = all_aoi_list # Init a lock to share scene projections into camera frame between multiple threads self._frame_lock = threading.Lock() diff --git a/src/argaze/AreaOfInterest/AOI2DScene.py b/src/argaze/AreaOfInterest/AOI2DScene.py index 4dc47f4..a726b23 100644 --- a/src/argaze/AreaOfInterest/AOI2DScene.py +++ b/src/argaze/AreaOfInterest/AOI2DScene.py @@ -26,9 +26,9 @@ AOI3DSceneType = TypeVar('AOI3DScene', bound="AOI3DScene") class AOI2DScene(AOIFeatures.AOIScene): """Define AOI 2D scene.""" - def __init__(self, aois_2d: dict = None): + def __init__(self, aoi_2d: dict = None): - super().__init__(2, aois_2d) + super().__init__(2, aoi_2d) @classmethod def from_svg(self, svg_filepath: str) -> AOI2DSceneType: @@ -121,7 +121,7 @@ class AOI2DScene(AOIFeatures.AOIScene): yield name, aoi, matching def draw_raycast(self, image: numpy.array, pointer:tuple, exclude=[], base_color=(0, 0, 255), matching_color=(0, 255, 0)): - """Draw AOIs with their matching status.""" + """Draw AOI with their matching status.""" for name, aoi, matching in self.raycast(pointer): diff --git a/src/argaze/AreaOfInterest/AOI3DScene.py b/src/argaze/AreaOfInterest/AOI3DScene.py index bfe189a..33a815c 100644 --- a/src/argaze/AreaOfInterest/AOI3DScene.py +++ b/src/argaze/AreaOfInterest/AOI3DScene.py @@ -38,15 +38,15 @@ AOI2DSceneType = TypeVar('AOI2DScene', bound="AOI2DScene") class AOI3DScene(AOIFeatures.AOIScene): """Define AOI 3D scene.""" - def __init__(self, aois_3d: dict = None): + def __init__(self, aoi_3d: dict = None): - super().__init__(3, aois_3d) + super().__init__(3, aoi_3d) @classmethod def from_obj(self, obj_filepath: str) -> AOI3DSceneType: """Load AOI3D scene from .obj file.""" - aois_3d = {} + aoi_3d = {} # regex rules for .obj file parsing OBJ_RX_DICT = { @@ -111,12 +111,12 @@ class AOI3DScene(AOIFeatures.AOIScene): # retreive all aoi3D vertices and sort them in clockwise order for name, face in faces.items(): aoi3D = AOIFeatures.AreaOfInterest([ vertices[i-1] for i in reversed(face) ]) - aois_3d[name] = aoi3D + aoi_3d[name] = aoi3D except IOError: raise IOError(f'File not found: {obj_filepath}') - return AOI3DScene(aois_3d) + return AOI3DScene(aoi_3d) def to_obj(self, obj_filepath: str): """Save AOI3D scene into .obj file.""" diff --git a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py index f0decfc..6dadaba 100644 --- a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py +++ b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py @@ -34,7 +34,7 @@ class AOIMatcher(GazeFeatures.AOIMatcher): self.__look_count = 0 self.__looked_aoi_data = (None, None) self.__circle_ratio_sum = {} - self.__aois_coverages = {} + self.__aoi_coverages = {} self.__matched_gaze_movement = None self.__matched_region = None @@ -79,14 +79,14 @@ class AOIMatcher(GazeFeatures.AOIMatcher): self.__looked_aoi_data = most_likely_looked_aoi_data # Calculate looked aoi circle ratio means - self.__aois_coverages = {} + self.__aoi_coverages = {} for aoi_name, circle_ratio_sum in self.__circle_ratio_sum.items(): circle_ratio_mean = circle_ratio_sum / self.__look_count # filter circle ration mean greater than 1 - self.__aois_coverages[aoi_name] = circle_ratio_mean if circle_ratio_mean < 1 else 1 + self.__aoi_coverages[aoi_name] = circle_ratio_mean if circle_ratio_mean < 1 else 1 # Update matched gaze movement self.__matched_gaze_movement = gaze_movement @@ -95,7 +95,7 @@ class AOIMatcher(GazeFeatures.AOIMatcher): self.__matched_region = matched_region # Return - if self.__aois_coverages[most_likely_looked_aoi_data[0]] > self.coverage_threshold: + if self.__aoi_coverages[most_likely_looked_aoi_data[0]] > self.coverage_threshold: return self.__looked_aoi_data @@ -179,8 +179,8 @@ class AOIMatcher(GazeFeatures.AOIMatcher): return self.__looked_aoi_data[0] @property - def aois_coverages(self) -> dict: - """Get all aois coverage means for current fixation. + def aoi_coverages(self) -> dict: + """Get all aoi coverage means for current fixation. It represents the ratio of fixation deviation circle surface that used to cover the aoi.""" - return self.__aois_coverages \ No newline at end of file + return self.__aoi_coverages \ No newline at end of file diff --git a/src/argaze/GazeAnalysis/TransitionMatrix.py b/src/argaze/GazeAnalysis/TransitionMatrix.py index 6f408e4..b346b5a 100644 --- a/src/argaze/GazeAnalysis/TransitionMatrix.py +++ b/src/argaze/GazeAnalysis/TransitionMatrix.py @@ -42,7 +42,7 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer): row_sum = aoi_scan_path.transition_matrix.apply(lambda row: row.sum(), axis=1) # Editing transition matrix probabilities - # Note: when no transiton starts from an aoi, destination probabilites is equal to 1/S where S is the number of aois + # Note: when no transiton starts from an aoi, destination probabilites is equal to 1/S where S is the number of aoi self.__transition_matrix_probabilities = aoi_scan_path.transition_matrix.apply(lambda row: row.apply(lambda p: p / row_sum[row.name] if row_sum[row.name] > 0 else 1 / row_sum.size), axis=1) # Calculate matrix density diff --git a/src/argaze/GazeFeatures.py b/src/argaze/GazeFeatures.py index 2dd1cab..814753e 100644 --- a/src/argaze/GazeFeatures.py +++ b/src/argaze/GazeFeatures.py @@ -842,13 +842,13 @@ AOIScanPathType = TypeVar('AOIScanPathType', bound="AOIScanPathType") class AOIScanPath(list): """List of aoi scan steps over successive aoi.""" - def __init__(self, expected_aois: list[str] = [], duration_max: int|float = 0): + def __init__(self, expected_aoi: list[str] = [], duration_max: int|float = 0): super().__init__() self.duration_max = duration_max - self.expected_aois = expected_aois + self.expected_aoi = expected_aoi self.__duration = 0 @property @@ -903,13 +903,13 @@ class AOIScanPath(list): return sequence @property - def expected_aois(self): + def expected_aoi(self): """List of all expected aoi.""" - return self.__expected_aois + return self.__expected_aoi - @expected_aois.setter - def expected_aois(self, expected_aois: list[str] = []): + @expected_aoi.setter + def expected_aoi(self, expected_aoi: list[str] = []): """Edit list of all expected aoi. !!! warning @@ -917,15 +917,15 @@ class AOIScanPath(list): """ self.clear() - self.__expected_aois = expected_aois + self.__expected_aoi = expected_aoi self.__movements = TimeStampedGazeMovements() self.__current_aoi = '' self.__index = ord('A') self.__aoi_letter = {} self.__letter_aoi = {} - size = len(self.__expected_aois) - self.__transition_matrix = pandas.DataFrame(numpy.zeros((size, size)), index=self.__expected_aois, columns=self.__expected_aois) + size = len(self.__expected_aoi) + self.__transition_matrix = pandas.DataFrame(numpy.zeros((size, size)), index=self.__expected_aoi, columns=self.__expected_aoi) @property def current_aoi(self): @@ -953,7 +953,7 @@ class AOIScanPath(list): !!! warning It could raise AOIScanStepError""" - if looked_aoi not in self.__expected_aois: + if looked_aoi not in self.__expected_aoi: raise AOIScanStepError('AOI not expected', looked_aoi) @@ -1013,7 +1013,7 @@ class AOIScanPath(list): """Get how many fixations are there in the scan path and how many fixation are there in each aoi.""" scan_fixations_count = 0 - aoi_fixations_count = {aoi: 0 for aoi in self.__expected_aois} + aoi_fixations_count = {aoi: 0 for aoi in self.__expected_aoi} for aoi_scan_step in self: -- cgit v1.1 From bd8cb794b3e6500783df86ce1add1fe6382b2f70 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 11:48:27 +0200 Subject: Moving gaze movement positions drawing options into gaze movment identifier modules. --- src/argaze/ArFeatures.py | 14 ++++++------ src/argaze/GazeAnalysis/DeviationCircleCoverage.py | 7 +----- .../DispersionThresholdIdentification.py | 13 +++++++---- src/argaze/GazeAnalysis/FocusPointInside.py | 2 +- .../VelocityThresholdIdentification.py | 13 +++++++---- src/argaze/GazeFeatures.py | 10 ++++----- .../utils/demo_data/demo_gaze_analysis_setup.json | 26 ++++++++++++++++------ 7 files changed, 51 insertions(+), 34 deletions(-) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 122efe8..cdb7130 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -1022,13 +1022,6 @@ class ArFrame(): self.scan_path.draw(image, **draw_scan_path) - # Draw layers if required - if draw_layers is not None: - - for layer_name, draw_layer in draw_layers.items(): - - self.layers[layer_name].draw(image, **draw_layer) - # Draw current fixation if required if draw_fixations is not None and self.gaze_movement_identifier is not None: @@ -1039,6 +1032,13 @@ class ArFrame(): self.gaze_movement_identifier.current_saccade.draw(image, **draw_saccades) + # Draw layers if required + if draw_layers is not None: + + for layer_name, draw_layer in draw_layers.items(): + + self.layers[layer_name].draw(image, **draw_layer) + # Draw current gaze position if required if draw_gaze_positions is not None: diff --git a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py index 6dadaba..d55d8c9 100644 --- a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py +++ b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py @@ -109,7 +109,7 @@ class AOIMatcher(GazeFeatures.AOIMatcher): return (None, None) - def draw(self, image: numpy.array, aoi_scene: AOIFeatures.AOIScene, draw_matched_fixation: dict = None, draw_matched_fixation_positions: dict = None, draw_matched_region: dict = None, draw_looked_aoi: dict = None, update_looked_aoi: bool = False, looked_aoi_name_color: tuple = None, looked_aoi_name_offset: tuple = (0, 0)): + def draw(self, image: numpy.array, aoi_scene: AOIFeatures.AOIScene, draw_matched_fixation: dict = None, draw_matched_region: dict = None, draw_looked_aoi: dict = None, update_looked_aoi: bool = False, looked_aoi_name_color: tuple = None, looked_aoi_name_offset: tuple = (0, 0)): """Draw matching into image. Parameters: @@ -132,11 +132,6 @@ class AOIMatcher(GazeFeatures.AOIMatcher): self.__matched_gaze_movement.draw(image, **draw_matched_fixation) - # Draw matched fixation positions if required - if draw_matched_fixation_positions is not None: - - self.__matched_gaze_movement.draw_positions(image, **draw_matched_fixation_positions) - # Draw matched aoi if self.looked_aoi.all() is not None: diff --git a/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py b/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py index 15fddf4..a7b9900 100644 --- a/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py +++ b/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py @@ -73,7 +73,7 @@ class Fixation(GazeFeatures.Fixation): return self - def draw(self, image: numpy.array, deviation_circle_color: tuple = None, duration_border_color: tuple = None, duration_factor: float = 1.): + def draw(self, image: numpy.array, deviation_circle_color: tuple = None, duration_border_color: tuple = None, duration_factor: float = 1., draw_positions: dict = None): """Draw fixation into image. Parameters: @@ -82,15 +82,20 @@ class Fixation(GazeFeatures.Fixation): duration_factor: how many pixels per duration unit """ + # Draw duration border if required + if duration_border_color is not None: + + cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), duration_border_color, int(self.duration * duration_factor)) + # Draw deviation circle if required if deviation_circle_color is not None: cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), deviation_circle_color, -1) - # Draw duration border if required - if duration_border_color is not None: + # Draw positions if required + if draw_positions is not None: - cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), duration_border_color, int(self.duration * duration_factor)) + self.draw_positions(image, **draw_positions) @dataclass(frozen=True) class Saccade(GazeFeatures.Saccade): diff --git a/src/argaze/GazeAnalysis/FocusPointInside.py b/src/argaze/GazeAnalysis/FocusPointInside.py index b3651e4..88cfbed 100644 --- a/src/argaze/GazeAnalysis/FocusPointInside.py +++ b/src/argaze/GazeAnalysis/FocusPointInside.py @@ -54,7 +54,7 @@ class AOIMatcher(GazeFeatures.AOIMatcher): return (None, None) - def draw(self, image: numpy.array, aoi_scene: AOIFeatures.AOIScene, draw_matched_fixation: dict = None, draw_matched_fixation_positions: dict = None, draw_looked_aoi: dict = None, looked_aoi_name_color: tuple = None, looked_aoi_name_offset: tuple = (0, 0)): + def draw(self, image: numpy.array, aoi_scene: AOIFeatures.AOIScene, draw_matched_fixation: dict = None, draw_looked_aoi: dict = None, looked_aoi_name_color: tuple = None, looked_aoi_name_offset: tuple = (0, 0)): """Draw matching into image. Parameters: diff --git a/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py b/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py index 64931f5..d10f666 100644 --- a/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py +++ b/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py @@ -72,7 +72,7 @@ class Fixation(GazeFeatures.Fixation): return self - def draw(self, image: numpy.array, deviation_circle_color: tuple = None, duration_border_color: tuple = None, duration_factor: float = 1.): + def draw(self, image: numpy.array, deviation_circle_color: tuple = None, duration_border_color: tuple = None, duration_factor: float = 1., draw_positions: dict = None): """Draw fixation into image. Parameters: @@ -81,15 +81,20 @@ class Fixation(GazeFeatures.Fixation): duration_factor: how many pixels per duration unit """ + # Draw duration border if required + if duration_border_color is not None: + + cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), duration_border_color, int(self.duration * duration_factor)) + # Draw deviation circle if required if deviation_circle_color is not None: cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), deviation_circle_color, -1) - # Draw duration border if required - if duration_border_color is not None: + # Draw positions if required + if draw_positions is not None: - cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), duration_border_color, int(self.duration * duration_factor)) + self.draw_positions(image, **draw_positions) @dataclass(frozen=True) class Saccade(GazeFeatures.Saccade): diff --git a/src/argaze/GazeFeatures.py b/src/argaze/GazeFeatures.py index 814753e..523bf2c 100644 --- a/src/argaze/GazeFeatures.py +++ b/src/argaze/GazeFeatures.py @@ -292,16 +292,16 @@ class GazeMovement(): ts_start, start_gaze_position = gaze_positions.pop_first() ts_next, next_gaze_position = gaze_positions.first - # Draw position if required - if position_color is not None: - - start_gaze_position.draw(image, position_color, draw_precision=False) - # Draw line between positions if required if line_color is not None: cv2.line(image, (int(start_gaze_position[0]), int(start_gaze_position[1])), (int(next_gaze_position[0]), int(next_gaze_position[1])), line_color, 1) + # Draw position if required + if position_color is not None: + + start_gaze_position.draw(image, position_color, draw_precision=False) + def draw(self, image: numpy.array, **kwargs): """Draw gaze movement into image.""" diff --git a/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json b/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json index 52945ae..fe5d197 100644 --- a/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json +++ b/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json @@ -8,6 +8,7 @@ "duration_min_threshold": 200 } }, + "filter_in_progress_identification": false, "scan_path": { "duration_max": 10000 }, @@ -59,8 +60,7 @@ }, "draw_saccades": { "line_color": [255, 0, 255] - }, - "deepness": 0 + } }, "draw_layers": { "main_layer": { @@ -72,11 +72,11 @@ }, "draw_aoi_matching": { "draw_matched_fixation": { - "deviation_circle_color": [255, 255, 255] - }, - "draw_matched_fixation_positions": { - "position_color": [0, 255, 255], - "line_color": [0, 0, 0] + "deviation_circle_color": [255, 255, 255], + "draw_positions": { + "position_color": [0, 255, 0], + "line_color": [0, 0, 0] + } }, "draw_matched_region": { "color": [0, 255, 0], @@ -91,6 +91,18 @@ } } }, + "draw_fixations": { + "deviation_circle_color": [255, 255, 255], + "duration_border_color": [127, 0, 127], + "duration_factor": 1e-2, + "draw_positions": { + "position_color": [0, 255, 255], + "line_color": [0, 0, 0] + } + }, + "draw_saccades": { + "line_color": [255, 0, 255] + }, "draw_gaze_positions": { "color": [0, 255, 255], "size": 2 -- cgit v1.1 From 128ca5fb2d5124784e217f3137bcde8eb0e7446c Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 11:49:55 +0200 Subject: Harmonizing documentation concepts. --- docs/user_guide/areas_of_interest/vision_cone_filtering.md | 2 +- .../aruco_markers_pipeline/aoi_3d_description.md | 2 +- .../user_guide/aruco_markers_pipeline/aoi_3d_projection.md | 2 +- .../gaze_analysis_pipeline/aoi_2d_description.md | 2 +- docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md | 14 +++++++------- .../gaze_analysis_pipeline/configuration_and_execution.md | 4 ++-- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/docs/user_guide/areas_of_interest/vision_cone_filtering.md b/docs/user_guide/areas_of_interest/vision_cone_filtering.md index 7b29642..5c377bf 100644 --- a/docs/user_guide/areas_of_interest/vision_cone_filtering.md +++ b/docs/user_guide/areas_of_interest/vision_cone_filtering.md @@ -1,7 +1,7 @@ Vision cone filtering ===================== -The [AOI3DScene](../../argaze.md/#argaze.AreaOfInterest.AOI3DScene) provides cone clipping support in order to select only [AOI](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) which are inside vision cone field. +The [AOI3DScene](../../argaze.md/#argaze.AreaOfInterest.AOI3DScene) provides cone clipping support in order to select only AOI which are inside vision cone field. ![Vision cone](../../img/vision_cone.png) diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md index a2bb8d7..5a1a16e 100644 --- a/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md +++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md @@ -1,7 +1,7 @@ Describe 3D AOI =============== -Once [ArUco markers are placed into a scene](aruco_markers_description.md), areas of interest need to be described into the same 3D referential. +Once [ArUco markers are placed into a scene](aruco_markers_description.md), [areas of interest (AOI)](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) need to be described into the same 3D referential. In the example scene, each screen is considered as an area of interest more the blue triangle area inside the top screen. diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md index d7df765..e96730a 100644 --- a/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md +++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md @@ -44,7 +44,7 @@ The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically ### AOI Scene -The [AOIScene](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AOIScene) defines a set of 3D [AreaOfInterest](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) registered by name. +The set of 3D AOI into the layer as defined at [3D AOI description chapter](aoi_3d_description.md). ## Add ArLayer to ArUcoCamera to project 3D AOI diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md b/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md index 6cca7ce..7229a9d 100644 --- a/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md +++ b/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md @@ -1,7 +1,7 @@ Describe 2D AOI ================ -Once [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) is [configured](configuration_and_execution.md), areas of interest need to be described to know what is looked in frame. +Once [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) is [configured](configuration_and_execution.md), [areas of interest (AOI)](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) need to be described to know what is looked in frame. ![2D AOI description](../../img/aoi_2d_description.png) diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md index cce3fcb..59d62fd 100644 --- a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md +++ b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md @@ -57,22 +57,22 @@ The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically ### AOI Scene -The [AOIScene](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AOIScene) defines a set of 2D [AreaOfInterest](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) registered by name. +The set of 2D AOI into the layer as defined at [2D AOI description chapter](aoi_2d_description.md). ![AOI Scene](../../img/ar_layer_aoi_scene.png) ### AOI Matcher -The first [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step aims to make match identified gaze movement with an AOI of the scene. +The first [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step aims to make match identified gaze movement with a layer's AOI. ![AOI Matcher](../../img/ar_layer_aoi_matcher.png) -The matching algorithm can be selected by instantiating a particular AOIMatcher [from GazeAnalysis submodule](pipeline_modules/aoi_matchers.md) or [from another python package](advanced_topics/module_loading.md). +The matching algorithm can be selected by instantiating a particular [AOIMatcher from GazeAnalysis submodule](pipeline_modules/aoi_matchers.md) or [from another python package](advanced_topics/module_loading.md). In the example file, the choosen matching algorithm is the [Deviation Circle Coverage](../../argaze.md/#argaze.GazeAnalysis.DeviationCircleCoverage) which has one specific *coverage_threshold* attribute. !!! warning "Mandatory" - JSON *aoi_matcher* entry is mandatory. Otherwise, the AOIScanPath and AOIScanPathAnalyzers steps are disabled. + JSON *aoi_matcher* entry is mandatory. Otherwise, the [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) and [AOIScanPathAnalyzers](../../argaze.md/#argaze.GazeFeatures.AOIScanPathAnalyzer) steps are disabled. ### AOI Scan Path @@ -80,17 +80,17 @@ The second [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step a ![AOI Scan Path](../../img/ar_layer_aoi_scan_path.png) -Once identified gaze movements are matched to AOI, they are automatically appended to the AOIScanPath if required. +Once gaze movements are matched to AOI, they are automatically appended to the AOIScanPath if required. The [AOIScanPath.duration_max](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.duration_max) attribute is the duration from which older AOI scan steps are removed each time new AOI scan steps are added. !!! note "Optional" - JSON *aoi_scan_path* entry is not mandatory. If aoi_scan_path_analyzers entry is not empty, the AOIScanPath step is automatically enabled. + JSON *aoi_scan_path* entry is not mandatory. If aoi_scan_path_analyzers entry is not empty, the [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) step is automatically enabled. ### AOI Scan Path Analyzers Finally, the last [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step consists in passing the previously built [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) to each loaded [AOIScanPathAnalyzer](../../argaze.md/#argaze.GazeFeatures.AOIScanPathAnalyzer). -Each analysis algorithm can be selected by instantiating a particular AOIScanPathAnalyzer [from GazeAnalysis submodule](pipeline_modules/aoi_scan_path_analyzers.md) or [from another python package](advanced_topics/module_loading.md). +Each analysis algorithm can be selected by instantiating a particular [AOIScanPathAnalyzer from GazeAnalysis submodule](pipeline_modules/aoi_scan_path_analyzers.md) or [from another python package](advanced_topics/module_loading.md). In the example file, the choosen analysis algorithms are the [Basic](../../argaze.md/#argaze.GazeAnalysis.Basic) module, the [TransitionMatrix](../../argaze.md/#argaze.GazeAnalysis.TransitionMatrix) module and the [NGram](../../argaze.md/#argaze.GazeAnalysis.NGram) module which has two specific *n_min* and *n_max* attributes. diff --git a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md index bb8eeaa..7b59a9c 100644 --- a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md +++ b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md @@ -61,7 +61,7 @@ The first [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline step is ![Gaze Movement Identifier](../../img/ar_frame_gaze_movement_identifier.png) -The identification algorithm can be selected by instantiating a particular GazeMovementIdentifier [from GazeAnalysis submodule](pipeline_modules/gaze_movement_identifiers.md) or [from another python package](advanced_topics/module_loading.md). +The identification algorithm can be selected by instantiating a particular [GazeMovementIdentifier from GazeAnalysis submodule](pipeline_modules/gaze_movement_identifiers.md) or [from another python package](advanced_topics/module_loading.md). In the example file, the choosen identification algorithm is the [Dispersion Threshold Identification (I-DT)](../../argaze.md/#argaze.GazeAnalysis.DispersionThresholdIdentification) which has two specific *deviation_max_threshold* and *duration_min_threshold* attributes. @@ -88,7 +88,7 @@ The [ScanPath.duration_max](../../argaze.md/#argaze.GazeFeatures.ScanPath.durati Finally, the last [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline step consists in passing the previously built [ScanPath](../../argaze.md/#argaze.GazeFeatures.ScanPath) to each loaded [ScanPathAnalyzer](../../argaze.md/#argaze.GazeFeatures.ScanPathAnalyzer). -Each analysis algorithm can be selected by instantiating a particular ScanPathAnalyzer [from GazeAnalysis submodule](pipeline_modules/scan_path_analyzers.md) or [from another python package](advanced_topics/module_loading.md). +Each analysis algorithm can be selected by instantiating a particular [ScanPathAnalyzer from GazeAnalysis submodule](pipeline_modules/scan_path_analyzers.md) or [from another python package](advanced_topics/module_loading.md). In the example file, the choosen analysis algorithms are the [Basic](../../argaze.md/#argaze.GazeAnalysis.Basic) module and the [ExploitExploreRatio](../../argaze.md/#argaze.GazeAnalysis.ExploitExploreRatio) module which has one specific *short_fixation_duration_threshold* attribute. -- cgit v1.1 From 5b27713d162e76a205ee46cff25e5d8fe993a15a Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 12:27:44 +0200 Subject: Fixing SVG loading. --- src/argaze/AreaOfInterest/AOI2DScene.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/src/argaze/AreaOfInterest/AOI2DScene.py b/src/argaze/AreaOfInterest/AOI2DScene.py index a726b23..f8599c5 100644 --- a/src/argaze/AreaOfInterest/AOI2DScene.py +++ b/src/argaze/AreaOfInterest/AOI2DScene.py @@ -67,11 +67,12 @@ class AOI2DScene(AOIFeatures.AOIScene): # Convert rect element into dict rect_dict = { - 'shape': 'rectangle', - 'x': float(rect.getAttribute('x')), - 'y': float(rect.getAttribute('y')), - 'width': float(rect.getAttribute('width')), - 'height': float(rect.getAttribute('height')) + "Rectangle": { + 'x': float(rect.getAttribute('x')), + 'y': float(rect.getAttribute('y')), + 'width': float(rect.getAttribute('width')), + 'height': float(rect.getAttribute('height')) + } } new_areas[rect.getAttribute('id')] = AOIFeatures.AreaOfInterest.from_dict(rect_dict) @@ -81,10 +82,11 @@ class AOI2DScene(AOIFeatures.AOIScene): # Convert circle element into dict circle_dict = { - 'shape': 'circle', - 'cx': float(circle.getAttribute('cx')), - 'cy': float(circle.getAttribute('cy')), - 'radius': float(circle.getAttribute('r')) + "Circle": { + 'cx': float(circle.getAttribute('cx')), + 'cy': float(circle.getAttribute('cy')), + 'radius': float(circle.getAttribute('r')) + } } new_areas[circle.getAttribute('id')] = AOIFeatures.AreaOfInterest.from_dict(circle_dict) -- cgit v1.1 From 1f16bf5c37b5fb5d44ed33d78f03e6fdeeac4013 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 12:28:15 +0200 Subject: Updating JSON AOI 2D description demo. --- src/argaze/utils/demo_data/aoi_2d_scene.json | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/argaze/utils/demo_data/aoi_2d_scene.json b/src/argaze/utils/demo_data/aoi_2d_scene.json index a0726e8..ac58b63 100644 --- a/src/argaze/utils/demo_data/aoi_2d_scene.json +++ b/src/argaze/utils/demo_data/aoi_2d_scene.json @@ -1,5 +1,18 @@ { - "RedSquare": [[268, 203], [576, 203], [576, 510], [268, 510]], "BlueTriangle":[[960, 664], [1113, 971], [806, 971]], - "GreenCircle":[[1497, 203], [1527, 206], [1556, 215], [1582, 229], [1605, 248], [1624, 271], [1639, 298], [1647, 327], [1650, 357], [1647, 387], [1639, 415], [1624, 442], [1605, 465], [1582, 484], [1556, 498], [1527, 507], [1497, 510], [1467, 507], [1438, 498], [1411, 484], [1388, 465], [1369, 442], [1355, 415], [1346, 387], [1343, 357], [1346, 327], [1355, 298], [1369, 271], [1388, 248], [1411, 229], [1438, 215], [1467, 206]] + "RedSquare": { + "Rectangle": { + "x": 268, + "y": 203, + "width": 308, + "height": 308 + } + }, + "GreenCircle": { + "Circle": { + "cx": 1497, + "cy": 356, + "radius": 153 + } + } } \ No newline at end of file -- cgit v1.1 From 69bcadf8f7b969ddd0fb284e86788cd658b897ab Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 12:28:34 +0200 Subject: Updating JSON AOI 2D description documentation. --- .../gaze_analysis_pipeline/aoi_2d_description.md | 39 ++++++---------------- 1 file changed, 11 insertions(+), 28 deletions(-) diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md b/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md index 7229a9d..0d5dbf0 100644 --- a/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md +++ b/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md @@ -19,11 +19,9 @@ SVG file format could be exported from most vector graphics editors. ``` xml - - - - - + + + ``` @@ -39,36 +37,21 @@ JSON file format allows to describe AOI. ``` json { - "Triangle" : [[1288.1, 189.466], [1991.24, 3399.34], [584.958, 3399.34]], - "BlueRectangle": { - "Rectangle": { - "x": 1257, - "y": 1905.18, - "width": 604.169, - "height": 988.564 - } - }, + "BlueTriangle":[[960, 664], [1113, 971], [806, 971]], "RedSquare": { "Rectangle": { - "x": 623.609, - "y": 658.357, - "width": 803.15, - "height": 803.15 + "x": 268, + "y": 203, + "width": 308, + "height": 308 } }, "GreenCircle": { "Circle": { - "cx": 675.77, - "cy": 2163.5, - "radius": 393.109 + "cx": 1497, + "cy": 356, + "radius": 153 } - }, - "PinkCircle": { - "Circle": { - "cx": 1902.02, - "cy": 879.316, - "radius": 195.313 - } } } ``` -- cgit v1.1 From b7b9b3ed2149b3bf66b7192b71bdf0bf6ce7dedf Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 12:47:23 +0200 Subject: Updating drawing options changes. --- .../gaze_analysis_pipeline/visualisation.md | 25 ++++++++++++++++------ 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/docs/user_guide/gaze_analysis_pipeline/visualisation.md b/docs/user_guide/gaze_analysis_pipeline/visualisation.md index 99f0259..c9cbf2c 100644 --- a/docs/user_guide/gaze_analysis_pipeline/visualisation.md +++ b/docs/user_guide/gaze_analysis_pipeline/visualisation.md @@ -25,8 +25,7 @@ Here is an extract from the JSON ArFrame configuration file with a sample where }, "draw_saccades": { "line_color": [255, 0, 255] - }, - "deepness": 0 + } }, "draw_layers": { "MyLayer": { @@ -38,11 +37,11 @@ Here is an extract from the JSON ArFrame configuration file with a sample where }, "draw_aoi_matching": { "draw_matched_fixation": { - "deviation_circle_color": [255, 255, 255] - }, - "draw_matched_fixation_positions": { - "position_color": [0, 255, 255], - "line_color": [0, 0, 0] + "deviation_circle_color": [255, 255, 255], + "draw_positions": { + "position_color": [0, 255, 0], + "line_color": [0, 0, 0] + } }, "draw_matched_region": { "color": [0, 255, 0], @@ -57,6 +56,18 @@ Here is an extract from the JSON ArFrame configuration file with a sample where } } }, + "draw_fixations": { + "deviation_circle_color": [255, 255, 255], + "duration_border_color": [127, 0, 127], + "duration_factor": 1e-2, + "draw_positions": { + "position_color": [0, 255, 255], + "line_color": [0, 0, 0] + } + }, + "draw_saccades": { + "line_color": [255, 0, 255] + }, "draw_gaze_positions": { "color": [0, 255, 255], "size": 2 -- cgit v1.1 From 460069c2f16028ad1da8ae8669b084d6c4100b78 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 12:47:42 +0200 Subject: Improving module loading documentation. --- .../gaze_analysis_pipeline/advanced_topics/module_loading.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/module_loading.md b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/module_loading.md index 0b45368..7796f45 100644 --- a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/module_loading.md +++ b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/module_loading.md @@ -1,7 +1,7 @@ Loading modules from another package ==================================== -It possible to load GazeMovementIdentifier, ScanPathAnalyzer or AOIScanPathAnalyzer modules from another [python package](https://docs.python.org/3/tutorial/modules.html#packages). +It possible to load [GazeMovementIdentifier](../../../argaze.md/#argaze.GazeFeatures.GazeMovementIdentifier), [ScanPathAnalyzer](../../../argaze.md/#argaze.GazeFeatures.ScanPathAnalyzer), [AOIMatcher](../../../argaze.md/#argaze.GazeFeatures.AOIMatcher) or [AOIScanPathAnalyzer](../../../argaze.md/#argaze.GazeFeatures.AOIScanPathAnalyzer) modules from another [python package](https://docs.python.org/3/tutorial/modules.html#packages). To do so, simply prepend the package where to find the module into the JSON configuration file: @@ -20,6 +20,12 @@ To do so, simply prepend the package where to find the module into the JSON conf } } ... + "aoi_matcher": { + "my_package.MyAOIMatcherAlgorithm": { + "specific_plugin_parameter": 0 + } + } + ... "aoi_scan_path_analyzers": { "my_package.MyAOIScanPathAnalyzerAlgorithm": { "specific_plugin_parameter": 0 @@ -28,7 +34,7 @@ To do so, simply prepend the package where to find the module into the JSON conf } ``` -Then, load your package from the python script where the ArFrame is created. +Then, load your package from the python script where the [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) is created. ```python from argaze import ArFeatures -- cgit v1.1 From 69d9b4d26d2956d65e0c24fe262071b0c4569c90 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 13:03:25 +0200 Subject: Removing drawing fixation positions option from FocusPointInside module. --- src/argaze/GazeAnalysis/FocusPointInside.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/argaze/GazeAnalysis/FocusPointInside.py b/src/argaze/GazeAnalysis/FocusPointInside.py index 88cfbed..24c319e 100644 --- a/src/argaze/GazeAnalysis/FocusPointInside.py +++ b/src/argaze/GazeAnalysis/FocusPointInside.py @@ -76,11 +76,6 @@ class AOIMatcher(GazeFeatures.AOIMatcher): self.__matched_gaze_movement.draw(image, **draw_matched_fixation) - # Draw matched fixation positions if required - if draw_matched_fixation_positions is not None: - - self.__matched_gaze_movement.draw_positions(image, **draw_matched_fixation_positions) - # Draw matched aoi if self.looked_aoi.all() is not None: -- cgit v1.1 From 73a71de88dc34374a0e0f6366c3f13714438dfe3 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 13:03:55 +0200 Subject: Updating aruco markers demo configuration. --- .../utils/demo_data/demo_aruco_markers_setup.json | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json index 5168297..e2edc8c 100644 --- a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json +++ b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json @@ -57,7 +57,7 @@ "background": "frame_background.jpg", "gaze_movement_identifier": { "DispersionThresholdIdentification": { - "deviation_max_threshold": 25, + "deviation_max_threshold": 50, "duration_min_threshold": 200 } }, @@ -80,12 +80,12 @@ "heatmap_weight": 0.5, "draw_scan_path": { "draw_fixations": { - "deviation_circle_color": [0, 255, 255], - "duration_border_color": [0, 127, 127], + "deviation_circle_color": [255, 0, 255], + "duration_border_color": [127, 0, 127], "duration_factor": 1e-2 }, "draw_saccades": { - "line_color": [0, 255, 255] + "line_color": [255, 0, 255] } }, "draw_layers": { @@ -100,10 +100,6 @@ "draw_matched_fixation": { "deviation_circle_color": [255, 255, 255] }, - "draw_matched_fixation_positions": { - "position_color": [0, 255, 255], - "line_color": [0, 0, 0] - }, "draw_looked_aoi": { "color": [0, 255, 0], "border_size": 2 @@ -113,6 +109,15 @@ } } }, + "draw_fixations": { + "deviation_circle_color": [255, 255, 255], + "duration_border_color": [127, 0, 127], + "duration_factor": 1e-2, + "draw_positions": { + "position_color": [0, 255, 255], + "line_color": [0, 0, 0] + } + }, "draw_gaze_positions": { "color": [0, 255, 255], "size": 2 -- cgit v1.1 From d06c56411a6a72fdd59d7da13308c20e8fbd77a2 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 13:04:13 +0200 Subject: Minor documenation change. --- docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md b/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md index 1c13013..b3ea2bb 100644 --- a/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md +++ b/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md @@ -7,7 +7,7 @@ Here is an example scene where markers are surrounding a multi-screen workspace ![Scene](../../img/scene.png) -## Print ArUco markers from a ArUco dictionary +## Print ArUco markers from an ArUco dictionary ArUco markers always belongs to a set of markers called ArUco markers dictionary. -- cgit v1.1 From 99a9baa109723f34af3372ed9fefc9557a592ff9 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 14:11:42 +0200 Subject: Indicating how to load and display optic parameters. --- .../optic_parameters_calibration.md | 58 ++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md b/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md index fbe06d1..db9cb73 100644 --- a/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md +++ b/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md @@ -131,3 +131,61 @@ Below, an optic_parameters JSON file example: ] } ``` + +## Load and display optic parameters + +[ArUcoCamera.detector.optic_parameters](../../argaze.md/#argaze.ArUcoMarkers.ArUcoOpticCalibrator.OpticParameters) can be enabled thanks to a dedicated JSON entry. + +Here is an extract from a JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) configuration file where optic parameters are loaded and displayed: + +```json +{ + "name": "My Full HD Camera", + "size": [1920, 1080], + "aruco_detector": { + "dictionary": "DICT_APRILTAG_16h5", + "marker_size": 5, + "optic_parameters": { + "rms": 0.6688921504088245, + "dimensions": [ + 1920, + 1080 + ], + "K": [ + [ + 1135.6524381415752, + 0.0, + 956.0685325355497 + ], + [ + 0.0, + 1135.9272506869524, + 560.059099810324 + ], + [ + 0.0, + 0.0, + 1.0 + ] + ], + "D": [ + 0.01655492265003404, + 0.1985524264972037, + 0.002129965902489484, + -0.0019528582922179365, + -0.5792910353639452 + ] + } + }, + ... + "image_parameters": { + ... + "draw_optic_parameters_grid": { + "width": 192, + "height": 108, + "z": 100, + "point_size": 1, + "point_color": [0, 0, 255] + } + } +``` \ No newline at end of file -- cgit v1.1 From 6598dc8ec081d56f8614d19f916731deb0bcf09f Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 14:55:57 +0200 Subject: Hormonizing JSON entry description format. Improving ArUco markers pipeline documentation. --- .../optic_parameters_calibration.md | 4 +-- .../aruco_markers_pipeline/aoi_3d_projection.md | 36 +++++++++++++--------- .../configuration_and_execution.md | 20 ++++++------ .../aruco_markers_pipeline/pose_estimation.md | 16 ++++++---- .../gaze_analysis_pipeline/aoi_analysis.md | 20 +++++++----- .../configuration_and_execution.md | 10 +++--- 6 files changed, 61 insertions(+), 45 deletions(-) diff --git a/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md b/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md index db9cb73..0a7ec4d 100644 --- a/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md +++ b/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md @@ -136,11 +136,11 @@ Below, an optic_parameters JSON file example: [ArUcoCamera.detector.optic_parameters](../../argaze.md/#argaze.ArUcoMarkers.ArUcoOpticCalibrator.OpticParameters) can be enabled thanks to a dedicated JSON entry. -Here is an extract from a JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) configuration file where optic parameters are loaded and displayed: +Here is an extract from the JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) configuration file where optic parameters are loaded and displayed: ```json { - "name": "My Full HD Camera", + "name": "My FullHD Camera", "size": [1920, 1080], "aruco_detector": { "dictionary": "DICT_APRILTAG_16h5", diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md index e96730a..7f98aa2 100644 --- a/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md +++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md @@ -1,13 +1,13 @@ -Project AOI into camera frame -============================= +Project 3D AOI into camera frame +================================ Once [ArUcoScene pose is estimated](pose_estimation.md) and [3D AOI are described](aoi_3d_description.md), AOI can be projected into [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) frame. ![3D AOI projection](../../img/aruco_camera_aoi_projection.png) -## Add ArLayer to ArUcoScene to load AOI +## Add ArLayer to ArUcoScene to load 3D AOI -The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class allows to load areas of interest description. An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) instance can contains multiples [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer). +The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class allows to load 3D AOI description. Here is the previous extract where one layer is added to the [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) configuration: @@ -38,15 +38,19 @@ Here is the previous extract where one layer is added to the [ArUcoScene](../../ Now, let's understand the meaning of each JSON entry. -### "MyLayer" +### *layers* -The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose. +An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) instance can contains multiples [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer) stored by name. -### AOI Scene +### MyLayer + +The name of an [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose. + +### *aoi_scene* The set of 3D AOI into the layer as defined at [3D AOI description chapter](aoi_3d_description.md). -## Add ArLayer to ArUcoCamera to project 3D AOI +## Add ArLayer to ArUcoCamera to project 3D AOI into Here is the previous extract where one layer is added to the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) and displayed: @@ -91,9 +95,13 @@ Here is the previous extract where one layer is added to the [ArUcoCamera](../.. Now, let's understand the meaning of each JSON entry. -### "MyLayer" +### *layers* + +An [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) instance can contains multiples [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer) stored by name. + +### MyLayer -The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose. +The name of an [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose. !!! warning "Layer name policy" @@ -103,11 +111,11 @@ The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layers are projected into their dedicated [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layers when calling the [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method. -## Add 2D AOI analysis +## Add AOI analysis features to ArUcoCamera -When a scene layer is projected into a camera layer, it means that the 3D [ArLayer.aoi_scene](../../argaze.md/#argaze.ArFeatures.ArLayer.aoi_scene) description of the scene becomes the 2D camera's [ArLayer.aoi_scene](../../argaze.md/#argaze.ArFeatures.ArLayer.aoi_scene) description of the camera. +When a scene layer is projected into a camera layer, it means that the 3D scene's AOI are transformed into 2D camera's AOI. -Therefore, it means that [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) benefits from all the services described in [2D AOI analysis pipeline section](../gaze_analysis_pipeline/aoi_analysis.md). +Therefore, it means that [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) benefits from all the services described in [AOI analysis pipeline section](../gaze_analysis_pipeline/aoi_analysis.md). Here is the previous extract where AOI matcher, AOI scan path and AOI scan path analyzers are added to the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layer: @@ -156,4 +164,4 @@ Here is the previous extract where AOI matcher, AOI scan path and AOI scan path !!! warning - Adding scan path and scan path analyzers to an [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layer doesn't make sense if the camera is moving. + Adding scan path and scan path analyzers to an [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layer doesn't make sense as the space viewed thru camera frame doesn't necessary reflect the space the gaze is covering. diff --git a/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md b/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md index 6bf84a9..2205ed2 100644 --- a/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md +++ b/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md @@ -62,15 +62,15 @@ aruco_camera = ArUcoCamera.ArUcoCamera.from_json('./configuration.json') Now, let's understand the meaning of each JSON entry. -### Name - *inherited from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame)* +### *name - inherited from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame)* The name of the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) frame. Basically useful for visualisation purpose. -### Size - *inherited from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame)* +### *size - inherited from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame)* The size of the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) frame in pixels. Be aware that gaze positions have to be in the same range of value to be projected in. -### ArUco Detector +### *aruco_detector* The first [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) pipeline step is to detect ArUco markers inside input image and estimate their poses. @@ -81,21 +81,21 @@ The [ArUcoDetector](../../argaze.md/#argaze.ArUcoMarkers.ArUcoDetector) is in ch !!! warning "Mandatory" JSON *aruco_detector* entry is mandatory. -### Gaze Movement Identifier - *inherited from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame)* +### *gaze_movement_identifier - inherited from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame)* The first [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline step dedicated to identify fixations or saccades from consecutive timestamped gaze positions. ![Gaze movement identification](../../img/aruco_camera_gaze_movement_identification.png) -### Image parameters - *inherited from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame)* +### *image_parameters - inherited from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame)* The usual [ArFrame visualisation parameters](../gaze_analysis_pipeline/visualisation.md) plus one additional *draw_detected_markers* field. ## Pipeline execution -### Detect ArUco markers, estimate scene pose and project AOI +### Detect ArUco markers, estimate scene pose and project 3D AOI -Pass each camera image to [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method to execute the whole pipeline dedicated to ArUco markers detection, scene pose estimation and AOI projection. +Pass each camera image to [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method to execute the whole pipeline dedicated to ArUco markers detection, scene pose estimation and 3D AOI projection. ```python # Assuming that Full HD (1920x1080) video stream or file is opened @@ -107,10 +107,10 @@ Pass each camera image to [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures # Capture image from video stream of file image = video_capture.read() - # Detect ArUco markers, estimate scene pose then, project AOI into camera frame + # Detect ArUco markers, estimate scene pose then, project 3D AOI into camera frame aruco_camera.watch(image) - # Display ArUcoCamera frame image to display detected ArUco markers, scene pose, AOI projection and ArFrame visualisation. + # Display ArUcoCamera frame image to display detected ArUco markers, scene pose, 2D AOI projection and ArFrame visualisation. ... aruco_camera.image() ``` @@ -135,4 +135,4 @@ Particularly, timestamped gaze positions can be passed one by one to [ArUcoCamer At this point, the [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method only detects ArUco markers and the [ArUcoCamera.look](../../argaze.md/#argaze.ArFeatures.ArCamera.look) method only process gaze movement identification without any AOI support as no scene description is provided into the JSON configuration file. - Read the next chapters to learn [how to estimate scene pose](pose_estimation.md) and [how to project AOI](aoi_3d_projection.md). \ No newline at end of file + Read the next chapters to learn [how to estimate scene pose](pose_estimation.md) and [how to project 3D AOI](aoi_3d_projection.md). \ No newline at end of file diff --git a/docs/user_guide/aruco_markers_pipeline/pose_estimation.md b/docs/user_guide/aruco_markers_pipeline/pose_estimation.md index d7da336..5dcde6f 100644 --- a/docs/user_guide/aruco_markers_pipeline/pose_estimation.md +++ b/docs/user_guide/aruco_markers_pipeline/pose_estimation.md @@ -1,13 +1,13 @@ Estimate scene pose =================== -An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) class defines a space with [ArUco markers inside](aruco_markers_description.md) helping to estimate scene pose when they are watched by [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera). +Once [ArUco markers are placed into a scene](aruco_markers_description.md) and [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) is [configured](configuration_and_execution.md), scene pose can be estimated. ![Scene pose estimation](../../img/aruco_camera_pose_estimation.png) ## Add ArUcoScene to ArUcoCamera JSON configuration file -An [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) instance can contains multiples [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene). +An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) class defines a space with [ArUco markers inside](aruco_markers_description.md) helping to estimate scene pose when they are watched by [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera). Here is an extract from the JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) configuration file with a sample where one scene is added and displayed: @@ -65,11 +65,15 @@ Here is an extract from the JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMark Now, let's understand the meaning of each JSON entry. -### "MyScene" +### *scenes* + +An [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) instance can contains multiples [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) stored by name. + +### MyScene -The name of the [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene). Basically useful for visualisation purpose. +The name of an [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene). Basically useful for visualisation purpose. -### ArUco markers group +### *aruco_markers_group* The 3D places of ArUco markers into the scene as defined at [ArUco markers description chapter](aruco_markers_description.md). Thanks to this description, it is possible to estimate the pose of [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) in [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) frame. @@ -77,6 +81,6 @@ The 3D places of ArUco markers into the scene as defined at [ArUco markers descr [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) pose estimation is done when calling the [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method. -### Draw scenes +### *draw_scenes* The drawing parameters of each loaded [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) in [ArUcoCamera.image](../../argaze.md/#argaze.ArFeatures.ArFrame.image). diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md index 59d62fd..9d2b3df 100644 --- a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md +++ b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md @@ -1,13 +1,13 @@ Enable AOI analysis =================== -The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class defines a space where to make matching of gaze movements with AOI and inside which those matchings need to be analyzed. +Once [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) is [configured](configuration_and_execution.md) and [2D AOI are described](aoi_2d_description.md), gaze movement can be matched with AOI to build an AOI scan path before analyze it. ![Layer](../../img/ar_layer.png) ## Add ArLayer to ArFrame JSON configuration file -An [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) instance can contains multiples [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer). +The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class defines a space where to make matching of gaze movements with AOI and inside which those matchings need to be analyzed. Here is an extract from the JSON [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) configuration file with a sample where one layer is added: @@ -51,17 +51,21 @@ Here is an extract from the JSON [ArFrame](../../argaze.md/#argaze.ArFeatures.Ar Now, let's understand the meaning of each JSON entry. -### "MyLayer" +### *layers* + +An [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) instance can contains multiples [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer) stored by name. + +### MyLayer -The name of the [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose. +The name of an [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically useful for visualisation purpose. -### AOI Scene +### *aoi_scene* The set of 2D AOI into the layer as defined at [2D AOI description chapter](aoi_2d_description.md). ![AOI Scene](../../img/ar_layer_aoi_scene.png) -### AOI Matcher +### *aoi_matcher* The first [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step aims to make match identified gaze movement with a layer's AOI. @@ -74,7 +78,7 @@ In the example file, the choosen matching algorithm is the [Deviation Circle Cov !!! warning "Mandatory" JSON *aoi_matcher* entry is mandatory. Otherwise, the [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) and [AOIScanPathAnalyzers](../../argaze.md/#argaze.GazeFeatures.AOIScanPathAnalyzer) steps are disabled. -### AOI Scan Path +### *aoi_scan_path* The second [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step aims to build a [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) defined as a list of [AOIScanSteps](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) made by a set of successive fixations/saccades onto a same AOI. @@ -87,7 +91,7 @@ The [AOIScanPath.duration_max](../../argaze.md/#argaze.GazeFeatures.AOIScanPath. !!! note "Optional" JSON *aoi_scan_path* entry is not mandatory. If aoi_scan_path_analyzers entry is not empty, the [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) step is automatically enabled. -### AOI Scan Path Analyzers +### *aoi_scan_path_analyzers* Finally, the last [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step consists in passing the previously built [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) to each loaded [AOIScanPathAnalyzer](../../argaze.md/#argaze.GazeFeatures.AOIScanPathAnalyzer). diff --git a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md index 7b59a9c..8ddd97a 100644 --- a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md +++ b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md @@ -44,18 +44,18 @@ ar_frame = ArFeatures.ArFrame.from_json('./configuration.json') Now, let's understand the meaning of each JSON entry. -### Name +### *name* The name of the [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame). Basically useful for visualisation purpose. -### Size +### *size* The size of the [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) defines the dimension of the rectangular area where gaze positions are projected. Be aware that gaze positions have to be in the same range of value to be projected in. !!! warning "Free spatial unit" Gaze positions can either be integer or float, pixels, millimeters or what ever you need. The only concern is that all spatial values used in further configurations have to be all the same unit. -### Gaze Movement Identifier +### *gaze_movement_identifier* The first [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline step is to identify fixations or saccades from consecutive timestamped gaze positions. @@ -71,7 +71,7 @@ In the example file, the choosen identification algorithm is the [Dispersion Thr !!! warning "Mandatory" JSON *gaze_movement_identifier* entry is mandatory. Otherwise, the ScanPath and ScanPathAnalyzers steps are disabled. -### Scan Path +### *scan_path* The second [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline step aims to build a [ScanPath](../../argaze.md/#argaze.GazeFeatures.ScanPath) defined as a list of [ScanSteps](../../argaze.md/#argaze.GazeFeatures.ScanStep) made by a fixation and a consecutive saccade. @@ -84,7 +84,7 @@ The [ScanPath.duration_max](../../argaze.md/#argaze.GazeFeatures.ScanPath.durati !!! note "Optional" JSON *scan_path* entry is not mandatory. If scan_path_analyzers entry is not empty, the ScanPath step is automatically enabled. -### Scan Path Analyzers +### *scan_path_analyzers* Finally, the last [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline step consists in passing the previously built [ScanPath](../../argaze.md/#argaze.GazeFeatures.ScanPath) to each loaded [ScanPathAnalyzer](../../argaze.md/#argaze.GazeFeatures.ScanPathAnalyzer). -- cgit v1.1 From 611ab703f970d0403c73ea67a1b2e70fa4d6da27 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 16:46:47 +0200 Subject: Renaming a documentation chapter file. --- .../aruco_markers_pipeline/aoi_3d_frame.md | 102 +++++++++++++++++++++ .../user_guide/aruco_markers_pipeline/aoi_frame.md | 64 ------------- mkdocs.yml | 1 + 3 files changed, 103 insertions(+), 64 deletions(-) create mode 100644 docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md delete mode 100644 docs/user_guide/aruco_markers_pipeline/aoi_frame.md diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md new file mode 100644 index 0000000..47dc73c --- /dev/null +++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md @@ -0,0 +1,102 @@ +Define a 3D AOI as a frame +========================== + +When an 3D AOI of the scene contains others coplanar 3D AOI, like a screen with GUI elements displayed on, it is better to described them as 2D AOI inside 2D coordinates system related to the containing 3D AOI. + +## Add ArFrame to ArUcoScene + +The [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) class defines a rectangular area where timestamped gaze positions are projected in and inside which they need to be analyzed. + +Here is the previous extract where "MyScreen" AOI is defined as a frame into [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) configuration: + +```json +{ + "name": "My FullHD camera", + "size": [1920, 1080], + ... + "scenes": { + "MyScene" : { + "aruco_markers_group": { + ... + }, + "layers": { + "MyLayer": { + "aoi_scene": { + "MyScreen": [[2.5, 2.5, -0.5], [37.5, 2.5, -0.5], [37.5, 27.5, -0.5], [2.5, 27.5, -0.5]] + } + } + }, + "frames": { + "MyScreen": { + "size": [350, 250], + "layers": { + "MyLayer": { + "aoi_scene": { + "BlueTriangle": [[100, 50], [250, 50], [175, 200]] + } + } + } + } + } + } + } + ... +} +``` +Now, let's understand the meaning of each JSON entry. + +### *frames* + +An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) instance can contains multiples [ArFrames](../../argaze.md/#argaze.ArFeatures.ArFrame) stored by name. + +### MyScreen + +The name of a 3D AOI **and** of an [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame). Basically useful for visualisation purpose. + +!!! warning "AOI / Frame names policy" + + An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layer 3D AOI is defined as an [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) frame, **provided they have the same name**. + +!!! warning "Layer name policy" + + An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) frame layer is projected into [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layer, **provided they have the same name**. + +!!! note + + [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) frame layers are projected into their dedicated [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layers when the JSON configuration file is loaded. + +## Pipeline execution + +### Map ArUcoCamera image into ArUcoScene frames + +After camera image is passed to [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method, it is possible to apply a perpective transformation in order to project watched image into each [ArUcoScenes](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) [frames background](../../argaze.md/#argaze.ArFeatures.ArFrame) image. + +```python +# Assuming that Full HD (1920x1080) video stream or file is opened +... + +# Assuming that the video reading is handled in a looping code block +...: + + # Capture image from video stream of file + image = video_capture.read() + + # Detect ArUco markers, estimate scene pose then, project 3D AOI into camera frame + aruco_camera.watch(image) + + # Map watched image into ArUcoScenes frames background + aruco_camera.map() +``` + +### Display each ArUcoScenes frames + +All [ArUcoScenes](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) frames image can be displayed as any [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame). + +```python + ... + + # Display all ArUcoScenes frames + for frame in aruco_camera.scene_frames: + + ... frame.image() +``` \ No newline at end of file diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_frame.md b/docs/user_guide/aruco_markers_pipeline/aoi_frame.md deleted file mode 100644 index 6b87d52..0000000 --- a/docs/user_guide/aruco_markers_pipeline/aoi_frame.md +++ /dev/null @@ -1,64 +0,0 @@ -Define an AOI as a frame -======================== - - - - -## Add ArFrame to ArUcoScene - -The [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) class defines a rectangular area where timestamped gaze positions are projected in and inside which they need to be analyzed. An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) instance can contains multiples [ArFrames](../../argaze.md/#argaze.ArFeatures.ArFrame). - -Here is the previous extract where the "GrayRectangle" AOI is defined as a frame into the [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) configuration: - -```json -{ - "name": "My FullHD camera", - "size": [1920, 1080], - ... - "scenes": { - "MyScene" : { - "aruco_markers_group": { - ... - }, - "layers": { - "MyLayer": { - "aoi_scene": { - "YellowSquare": [[6.2, -7.275252, 25.246159], [31.2, -7.275252, 25.246159], [31.2, 1.275252, 1.753843], [6.2, 1.275252, 1.753843]], - "GrayRectangle": [[2.5, 2.5, -0.5], [37.5, 2.5, -0.5], [37.5, 27.5, -0.5], [2.5, 27.5, -0.5]] - } - } - }, - "frames": { - "GrayRectangle": { - "size": [350, 250], - "layers": { - "MyLayer": { - "aoi_scene": { - "BlueTriangle": [[100, 50], [250, 50], [175, 200]] - } - } - } - } - } - } - } - ... -} -``` -Now, let's understand the meaning of each JSON entry. - -### "GrayRectangle" - -The name of the AOI and the [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame). Basically useful for visualisation purpose. - -!!! warning "Frame name policy" - - An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layer AOI is defined as an [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) frame, **provided they have the same name**. - -!!! warning "Layer name policy" - - An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) frame layer is projected into [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layer, **provided they have the same name**. - -!!! note - - [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) frame layers are projected into their dedicated [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layers when the JSON configuration file is loaded. \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 3c5f10c..b4b782e 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -29,6 +29,7 @@ nav: - user_guide/aruco_markers_pipeline/configuration_and_execution.md - user_guide/aruco_markers_pipeline/pose_estimation.md - user_guide/aruco_markers_pipeline/aoi_3d_projection.md + - user_guide/aruco_markers_pipeline/aoi_3d_frame.md - Advanced Topics: - user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md -- cgit v1.1 From 418952f8588457d3e0a07d9d2b9cf3f250470319 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 16:47:21 +0200 Subject: Updating documentation chapter changes. --- docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md | 4 ++-- docs/user_guide/aruco_markers_pipeline/introduction.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md index 7f98aa2..acbe31d 100644 --- a/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md +++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md @@ -9,7 +9,7 @@ Once [ArUcoScene pose is estimated](pose_estimation.md) and [3D AOI are describe The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class allows to load 3D AOI description. -Here is the previous extract where one layer is added to the [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) configuration: +Here is the previous extract where one layer is added to [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) configuration: ```json { @@ -52,7 +52,7 @@ The set of 3D AOI into the layer as defined at [3D AOI description chapter](aoi_ ## Add ArLayer to ArUcoCamera to project 3D AOI into -Here is the previous extract where one layer is added to the [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) and displayed: +Here is the previous extract where one layer is added to [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) configuration and displayed: ```json { diff --git a/docs/user_guide/aruco_markers_pipeline/introduction.md b/docs/user_guide/aruco_markers_pipeline/introduction.md index dc3aa4a..ee424f0 100644 --- a/docs/user_guide/aruco_markers_pipeline/introduction.md +++ b/docs/user_guide/aruco_markers_pipeline/introduction.md @@ -19,8 +19,8 @@ To build your own ArUco markers pipeline, you need to know: * [How to describe scene's AOI](aoi_3d_description.md), * [How to load and execute ArUco markers pipeline](configuration_and_execution.md), * [How to estimate scene pose](pose_estimation.md), -* [How to project AOI into camera frame](aoi_3d_projection.md), -* [How to define an AOI as a frame](aoi_frame.md) +* [How to project 3D AOI into camera frame](aoi_3d_projection.md), +* [How to define a 3D AOI as a frame](aoi_3d_frame.md) More advanced features are also explained like: -- cgit v1.1 From 9d3cebdf1e3c780c5e1ec6ccc198bcf79e258ee8 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 17:37:05 +0200 Subject: Adding annotations. --- src/argaze/ArFeatures.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index cdb7130..edeac6b 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -1410,7 +1410,12 @@ class ArCamera(ArFrame): yield scene_frame def watch(self, image: numpy.array) -> Tuple[float, dict]: - """Detect AR features from image and project scenes into camera frame.""" + """Detect AR features from image and project scenes into camera frame. + + Returns: + - detection_time: AR features detection time in ms + - exceptions: dictionary with exception raised per scene + """ raise NotImplementedError('watch() method not implemented') -- cgit v1.1 From 5838151f49e2ccfdf5bc2bc153cf5b493178bb09 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 17:37:35 +0200 Subject: Adding draw_scenes to aruco markers demo. --- src/argaze/utils/demo_data/demo_aruco_markers_setup.json | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json index e2edc8c..c881452 100644 --- a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json +++ b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json @@ -41,6 +41,20 @@ "z": 100, "point_size": 1, "point_color": [0, 0, 255] + }, + "draw_scenes": { + "ArScene Demo": { + "draw_aruco_markers_group": { + "draw_axes": { + "thickness": 3, + "length": 10 + }, + "draw_places": { + "color": [0, 0, 0], + "border_size": 1 + } + } + } } }, "scenes": { -- cgit v1.1 From 433e68397d3fcb7d3c9ab07e1fa0a8edc0c4e583 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 17:38:34 +0200 Subject: Documentating gaze analysis for ArUcoScenes frames. --- docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md index 47dc73c..8075426 100644 --- a/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md +++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md @@ -67,7 +67,7 @@ The name of a 3D AOI **and** of an [ArFrame](../../argaze.md/#argaze.ArFeatures. ## Pipeline execution -### Map ArUcoCamera image into ArUcoScene frames +### Map ArUcoCamera image into ArUcoScenes frames After camera image is passed to [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method, it is possible to apply a perpective transformation in order to project watched image into each [ArUcoScenes](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) [frames background](../../argaze.md/#argaze.ArFeatures.ArFrame) image. @@ -88,6 +88,14 @@ After camera image is passed to [ArUcoCamera.watch](../../argaze.md/#argaze.ArFe aruco_camera.map() ``` +### Analyse timestamped gaze positions into ArUcoScenes frames + +[ArUcoScenes](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) frames benefits from all the services described in [gaze analysis pipeline section](../gaze_analysis_pipeline/introduction.md). + +!!! note + + Timestamped gaze positions passed to [ArUcoCamera.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method are projected into [ArUcoScenes](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) frames if applicable. + ### Display each ArUcoScenes frames All [ArUcoScenes](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) frames image can be displayed as any [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame). -- cgit v1.1 From 6af30f98e4d8c89e4602c4048622ce5e66dbe774 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 17:39:10 +0200 Subject: Adding new aruco markers pipeline scripting chapter. --- .../advanced_topics/scripting.md | 132 +++++++++++++++++++++ .../aruco_markers_pipeline/introduction.md | 2 +- .../advanced_topics/scripting.md | 10 +- mkdocs.yml | 1 + 4 files changed, 139 insertions(+), 6 deletions(-) create mode 100644 docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md diff --git a/docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md b/docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md new file mode 100644 index 0000000..c1db6d5 --- /dev/null +++ b/docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md @@ -0,0 +1,132 @@ +Script the pipeline +=================== + +All aruco markers pipeline objects are accessible from Python script. +This could be particularly useful for realtime AR interaction applications. + +## Load ArUcoCamera configuration from dictionary + +First of all, [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) configuration can be loaded from a python dictionary. + +```python +from argaze.ArUcoMarkers import ArUcoCamera + +# Edit a dict with ArUcoCamera configuration +configuration = { + "name": "My FullHD camera", + "size": (1920, 1080), + ... + "aruco_detector": { + ... + }, + "scenes": { + "MyScene" : { + "aruco_markers_group": { + ... + }, + "layers": { + "MyLayer": { + "aoi_scene": { + ... + } + }, + ... + } + }, + ... + } + "layers": { + "MyLayer": { + ... + }, + ... + }, + "image_parameters": { + ... + } +} + +# Load ArUcoCamera +aruco_camera = ArUcoCamera.ArUcoCamera.from_dict(configuration) + +# Do something with ArUcoCamera +... +``` + +## Access to ArUcoCamera and ArScenes attributes + +Then, once the configuration is loaded, it is possible to access to its attributes: [read ArUcoCamera code reference](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) to get a complete list of what is available. + +Thus, the [ArUcoCamera.scenes](../../../argaze.md/#argaze.ArFeatures.ArCamera) attribute allows to access each loaded aruco scene and so, access to their attributes: [read ArUcoScene code reference](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) to get a complete list of what is available. + +```python +from argaze import ArFeatures + +# Assuming the ArUcoCamera is loaded +... + +# Iterate over each ArUcoCamera scene +for name, aruco_scene in aruco_camera.scenes.items(): + ... +``` + +## Pipeline execution outputs + +[ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method returns many data about pipeline execution. + +```python +# Assuming that images are available +...: + + # Watch image with ArUco camera + detection_time, exception = aruco_camera.watch(image) + + # Do something with pipeline detection times + ... + + # Do something with pipeline exception + if exception: + ... +``` + +Let's understand the meaning of each returned data. + +### *detection_time* + +ArUco marker detection time in ms. + +### *exception* + +A [python Exception](https://docs.python.org/3/tutorial/errors.html#exceptions) object raised during pipeline execution. + +## Setup ArUcoCamera image parameters + +Specific [ArUcoCamera.image](../../../argaze.md/#argaze.ArFeatures.ArFrame.image) method parameters can be configured thanks to a python dictionary. + +```python +# Assuming ArUcoCamera is loaded +... + +# Edit a dict with ArUcoCamera image parameters +image_parameters = { + "draw_detected_markers": { + ... + }, + "draw_scenes": { + ... + }, + "draw_optic_parameters_grid": { + ... + }, + ... +} + +# Pass image parameters to ArUcoCamera +aruco_camera_image = aruco_camera.image(**image_parameters) + +# Do something with ArUcoCamera image +... +``` + +!!! note + [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) inherits from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) and so, benefits from all image parameters described in [gaze analysis pipeline visualisation section](../../gaze_analysis_pipeline/visualisation.md). \ No newline at end of file diff --git a/docs/user_guide/aruco_markers_pipeline/introduction.md b/docs/user_guide/aruco_markers_pipeline/introduction.md index ee424f0..f5bba18 100644 --- a/docs/user_guide/aruco_markers_pipeline/introduction.md +++ b/docs/user_guide/aruco_markers_pipeline/introduction.md @@ -24,6 +24,6 @@ To build your own ArUco markers pipeline, you need to know: More advanced features are also explained like: - +* [How to script ArUco markers pipeline](advanced_topics/scripting.md) * [How to calibrate optic parameters](advanced_topics/optic_parameters_calibration.md) diff --git a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md index 637ba57..eefeee1 100644 --- a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md +++ b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/scripting.md @@ -106,7 +106,7 @@ for name, ar_layer in ar_frame.layers.items(): Let's understand the meaning of each returned data. -### Gaze movement +### *gaze_movement* A [GazeMovement](../../../argaze.md/#argaze.GazeFeatures.GazeMovement) once it have been identified by [ArFrame.gaze_movement_identifier](../../../argaze.md/#argaze.ArFeatures.ArFrame) object from incoming consecutive timestamped gaze positions. If no gaze movement have been identified, it returns an [UnvalidGazeMovement](../../../argaze.md/#argaze.GazeFeatures.UnvalidGazeMovement). @@ -115,19 +115,19 @@ In that case, the returned gaze movement *finished* flag is false. Then, the returned gaze movement type can be tested thanks to [GazeFeatures.is_fixation](../../../argaze.md/#argaze.GazeFeatures.is_fixation) and [GazeFeatures.is_saccade](../../../argaze.md/#argaze.GazeFeatures.is_saccade) functions. -### Scan path analysis +### *scan_path_analysis* A dictionary with all last scan path analysis if new scan step have been added to the [ArFrame.scan_path](../../../argaze.md/#argaze.ArFeatures.ArFrame) object. -### Layers analysis +### *layers_analysis* A dictionary with all layers AOI scan path analysis if new AOI scan step have been added to an [ArLayer.aoi_scan_path](../../../argaze.md/#argaze.ArFeatures.ArLayer) object. -### Execution times +### *execution_times* A dictionary with each pipeline step execution time. -### Exception +### *exception* A [python Exception](https://docs.python.org/3/tutorial/errors.html#exceptions) object raised during pipeline execution. diff --git a/mkdocs.yml b/mkdocs.yml index b4b782e..2e21826 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -31,6 +31,7 @@ nav: - user_guide/aruco_markers_pipeline/aoi_3d_projection.md - user_guide/aruco_markers_pipeline/aoi_3d_frame.md - Advanced Topics: + - user_guide/aruco_markers_pipeline/advanced_topics/scripting.md - user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md # - Areas Of Interest: -- cgit v1.1 From 06548cb7cb807f42fa42e4777288f67f259ae64a Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 26 Sep 2023 17:43:56 +0200 Subject: Fixing errors returned by mkdocs serve. --- .../advanced_topics/optic_parameters_calibration.md | 4 ++-- .../aruco_markers_pipeline/advanced_topics/scripting.md | 8 ++++---- .../gaze_analysis_pipeline/advanced_topics/module_loading.md | 2 +- src/argaze/GazeAnalysis/DeviationCircleCoverage.py | 1 - src/argaze/GazeAnalysis/FocusPointInside.py | 1 - 5 files changed, 7 insertions(+), 9 deletions(-) diff --git a/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md b/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md index 0a7ec4d..3277216 100644 --- a/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md +++ b/docs/user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md @@ -134,9 +134,9 @@ Below, an optic_parameters JSON file example: ## Load and display optic parameters -[ArUcoCamera.detector.optic_parameters](../../argaze.md/#argaze.ArUcoMarkers.ArUcoOpticCalibrator.OpticParameters) can be enabled thanks to a dedicated JSON entry. +[ArUcoCamera.detector.optic_parameters](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoOpticCalibrator.OpticParameters) can be enabled thanks to a dedicated JSON entry. -Here is an extract from the JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) configuration file where optic parameters are loaded and displayed: +Here is an extract from the JSON [ArUcoCamera](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) configuration file where optic parameters are loaded and displayed: ```json { diff --git a/docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md b/docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md index c1db6d5..0b2ef52 100644 --- a/docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md +++ b/docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md @@ -6,7 +6,7 @@ This could be particularly useful for realtime AR interaction applications. ## Load ArUcoCamera configuration from dictionary -First of all, [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) configuration can be loaded from a python dictionary. +First of all, [ArUcoCamera](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) configuration can be loaded from a python dictionary. ```python from argaze.ArUcoMarkers import ArUcoCamera @@ -55,7 +55,7 @@ aruco_camera = ArUcoCamera.ArUcoCamera.from_dict(configuration) ## Access to ArUcoCamera and ArScenes attributes -Then, once the configuration is loaded, it is possible to access to its attributes: [read ArUcoCamera code reference](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) to get a complete list of what is available. +Then, once the configuration is loaded, it is possible to access to its attributes: [read ArUcoCamera code reference](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) to get a complete list of what is available. Thus, the [ArUcoCamera.scenes](../../../argaze.md/#argaze.ArFeatures.ArCamera) attribute allows to access each loaded aruco scene and so, access to their attributes: [read ArUcoScene code reference](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) to get a complete list of what is available. @@ -72,7 +72,7 @@ for name, aruco_scene in aruco_camera.scenes.items(): ## Pipeline execution outputs -[ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method returns many data about pipeline execution. +[ArUcoCamera.watch](../../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method returns many data about pipeline execution. ```python # Assuming that images are available @@ -129,4 +129,4 @@ aruco_camera_image = aruco_camera.image(**image_parameters) ``` !!! note - [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) inherits from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) and so, benefits from all image parameters described in [gaze analysis pipeline visualisation section](../../gaze_analysis_pipeline/visualisation.md). \ No newline at end of file + [ArUcoCamera](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) inherits from [ArFrame](../../../argaze.md/#argaze.ArFeatures.ArFrame) and so, benefits from all image parameters described in [gaze analysis pipeline visualisation section](../../gaze_analysis_pipeline/visualisation.md). \ No newline at end of file diff --git a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/module_loading.md b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/module_loading.md index 7796f45..f2e84d6 100644 --- a/docs/user_guide/gaze_analysis_pipeline/advanced_topics/module_loading.md +++ b/docs/user_guide/gaze_analysis_pipeline/advanced_topics/module_loading.md @@ -34,7 +34,7 @@ To do so, simply prepend the package where to find the module into the JSON conf } ``` -Then, load your package from the python script where the [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) is created. +Then, load your package from the python script where the [ArFrame](../../../argaze.md/#argaze.ArFeatures.ArFrame) is created. ```python from argaze import ArFeatures diff --git a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py index d55d8c9..f57d432 100644 --- a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py +++ b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py @@ -116,7 +116,6 @@ class AOIMatcher(GazeFeatures.AOIMatcher): image: where to draw aoi_scene: to refresh looked aoi if required draw_matched_fixation: Fixation.draw parameters (which depends of the loaded gaze movement identifier module, if None, no fixation is drawn) - draw_matched_fixation_positions: GazeMovement.draw_positions parameters (if None, no fixation is drawn) draw_matched_region: AOIFeatures.AOI.draw parameters (if None, no matched region is drawn) draw_looked_aoi: AOIFeatures.AOI.draw parameters (if None, no looked aoi is drawn) looked_aoi_name_color: color of text (if None, no looked aoi name is drawn) diff --git a/src/argaze/GazeAnalysis/FocusPointInside.py b/src/argaze/GazeAnalysis/FocusPointInside.py index 24c319e..81a9d20 100644 --- a/src/argaze/GazeAnalysis/FocusPointInside.py +++ b/src/argaze/GazeAnalysis/FocusPointInside.py @@ -61,7 +61,6 @@ class AOIMatcher(GazeFeatures.AOIMatcher): image: where to draw aoi_scene: to refresh looked aoi if required draw_matched_fixation: Fixation.draw parameters (which depends of the loaded gaze movement identifier module, if None, no fixation is drawn) - draw_matched_fixation_positions: GazeMovement.draw_positions parameters (if None, no fixation is drawn) draw_looked_aoi: AOIFeatures.AOI.draw parameters (if None, no looked aoi is drawn) looked_aoi_name_color: color of text (if None, no looked aoi name is drawn) looked_aoi_name_offset: ofset of text from the upper left aoi bounding box corner -- cgit v1.1 From 3b523bb755d706ce945e3b9d93416909021f8e58 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 27 Sep 2023 09:57:42 +0200 Subject: Changing DetectorParameter printing. --- src/argaze/ArUcoMarkers/ArUcoDetector.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py index 82c9394..3260d00 100644 --- a/src/argaze/ArUcoMarkers/ArUcoDetector.py +++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py @@ -98,8 +98,16 @@ class DetectorParameters(): return DetectorParameters(**json.load(configuration_file)) - def __str__(self, print_all=False) -> str: - """Detector paremeters string representation.""" + def __str__(self) -> str: + """Detector parameters string representation.""" + + return f'{self}' + + def __format__(self, spec) -> str: + """Formated detector parameters string representation. + + Parameters: + spec: 'modified' to get only modified parameters.""" output = '' @@ -109,7 +117,7 @@ class DetectorParameters(): output += f'\t*{parameter}: {getattr(self.__parameters, parameter)}\n' - elif print_all: + elif spec == "": output += f'\t{parameter}: {getattr(self.__parameters, parameter)}\n' -- cgit v1.1 From 1a0dc73d98fdbe0d45523ca3ac914928b0ae775a Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 27 Sep 2023 09:58:43 +0200 Subject: Adding new chapter to explain how to configure ArUco marker detection. --- .../aruco_detector_configuration.md | 39 ++++++++++++++++++++++ .../aruco_markers_pipeline/introduction.md | 2 +- mkdocs.yml | 1 + 3 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 docs/user_guide/aruco_markers_pipeline/advanced_topics/aruco_detector_configuration.md diff --git a/docs/user_guide/aruco_markers_pipeline/advanced_topics/aruco_detector_configuration.md b/docs/user_guide/aruco_markers_pipeline/advanced_topics/aruco_detector_configuration.md new file mode 100644 index 0000000..98b0841 --- /dev/null +++ b/docs/user_guide/aruco_markers_pipeline/advanced_topics/aruco_detector_configuration.md @@ -0,0 +1,39 @@ +Improve ArUco markers detection +=============================== + +As explain in [OpenCV ArUco documentation](https://docs.opencv.org/4.x/d1/dcd/structcv_1_1aruco_1_1DetectorParameters.html), ArUco markers detection is highly configurable. + +## Load ArUcoDetector parameters + +[ArUcoCamera.detector.parameters](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoDetector.Parameters) can be loaded thanks to a dedicated JSON entry. + +Here is an extract from the JSON [ArUcoCamera](../../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) configuration file with ArUco detector parameters: + +```json +{ + "name": "My FullHD camera", + "size": [1920, 1080], + "aruco_detector": { + "dictionary": "DICT_APRILTAG_16h5", + "marker_size": 5, + "parameters": { + "cornerRefinementMethod": 3, + "aprilTagQuadSigma": 2, + "aprilTagDeglitch": 1 + } + }, + ... +``` + +## Print ArUcoDetector parameters + +```python +# Assuming ArUcoCamera is loaded +... + +# Print all ArUcoDetector parameters +print(aruco_camera.aruco_detector.parameters) + +# Print only modified ArUcoDetector parameters +print(f'{aruco_camera.aruco_detector.parameters:modified}') +``` diff --git a/docs/user_guide/aruco_markers_pipeline/introduction.md b/docs/user_guide/aruco_markers_pipeline/introduction.md index f5bba18..5a07b49 100644 --- a/docs/user_guide/aruco_markers_pipeline/introduction.md +++ b/docs/user_guide/aruco_markers_pipeline/introduction.md @@ -26,4 +26,4 @@ More advanced features are also explained like: * [How to script ArUco markers pipeline](advanced_topics/scripting.md) * [How to calibrate optic parameters](advanced_topics/optic_parameters_calibration.md) - +* [How to improve ArUco markers detection](advanced_topics/aruco_detector_configuration.md) diff --git a/mkdocs.yml b/mkdocs.yml index 2e21826..784c9e2 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -33,6 +33,7 @@ nav: - Advanced Topics: - user_guide/aruco_markers_pipeline/advanced_topics/scripting.md - user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md + - user_guide/aruco_markers_pipeline/advanced_topics/aruco_detector_configuration.md # - Areas Of Interest: # - user_guide/areas_of_interest/introduction.md -- cgit v1.1 From fbf4c80b9e7dabb6e2bbcb94df44e627de5646dc Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 27 Sep 2023 18:02:34 +0200 Subject: Updating illustrations. --- docs/img/aoi_2d_description.png | Bin 0 -> 7458 bytes docs/img/aoi_3d_description.png | Bin 14538 -> 16928 bytes docs/img/aoi_matcher.png | Bin 0 -> 16444 bytes docs/img/aoi_projection.png | Bin 20707 -> 0 bytes docs/img/aoi_scan_path.png | Bin 29067 -> 13583 bytes docs/img/ar_frame.png | Bin 24931 -> 16684 bytes docs/img/ar_frame_background.png | Bin 101101 -> 0 bytes docs/img/ar_frame_gaze_movement_identifier.png | Bin 27362 -> 0 bytes docs/img/ar_frame_heatmap.png | Bin 60597 -> 0 bytes docs/img/ar_frame_scan_path.png | Bin 18906 -> 0 bytes docs/img/ar_frame_visualisation.png | Bin 31964 -> 0 bytes docs/img/ar_layer.png | Bin 19460 -> 16374 bytes docs/img/ar_layer_aoi_matcher.png | Bin 22948 -> 0 bytes docs/img/ar_layer_aoi_scan_path.png | Bin 14711 -> 0 bytes docs/img/ar_layer_aoi_scene.png | Bin 9014 -> 0 bytes docs/img/aruco_camera_aoi_frame.png | Bin 0 -> 41104 bytes docs/img/aruco_camera_aoi_projection.png | Bin 57019 -> 34892 bytes docs/img/aruco_camera_frame.png | Bin 49201 -> 31515 bytes .../aruco_camera_gaze_movement_identification.png | Bin 56059 -> 36834 bytes docs/img/aruco_camera_markers_detection.png | Bin 52844 -> 35629 bytes docs/img/aruco_camera_pose_estimation.png | Bin 51798 -> 34695 bytes docs/img/aruco_dictionaries.png | Bin 89114 -> 66662 bytes docs/img/aruco_markers_description.png | Bin 17207 -> 16478 bytes docs/img/aruco_scene.png | Bin 17124 -> 0 bytes docs/img/background.png | Bin 0 -> 189282 bytes docs/img/circle_intersection.png | Bin 10800 -> 0 bytes docs/img/contains_point.png | Bin 6505 -> 0 bytes docs/img/detected_markers.png | Bin 14941 -> 0 bytes docs/img/distance.png | Bin 9679 -> 0 bytes docs/img/fixation_and_saccade.png | Bin 22230 -> 0 bytes docs/img/gaze_movement_identifier.png | Bin 0 -> 22800 bytes docs/img/get_last_before.png | Bin 9017 -> 0 bytes docs/img/get_last_until.png | Bin 9113 -> 0 bytes docs/img/heatmap.png | Bin 46531 -> 50869 bytes docs/img/opencv_aruco.png | Bin 0 -> 25065 bytes docs/img/optic_calibrated.png | Bin 9118 -> 0 bytes docs/img/optic_distorsion.png | Bin 21220 -> 0 bytes docs/img/overlapping.png | Bin 15668 -> 0 bytes docs/img/point_spread.png | Bin 8542 -> 8454 bytes docs/img/pop_last_before.png | Bin 10447 -> 0 bytes docs/img/pop_last_until.png | Bin 11074 -> 0 bytes docs/img/pose_estimation.png | Bin 15818 -> 0 bytes docs/img/scan_path.png | Bin 20567 -> 12724 bytes docs/img/scene.png | Bin 44671 -> 32079 bytes docs/img/timestamped_gaze_positions.png | Bin 23134 -> 11209 bytes docs/img/vision_cone.png | Bin 14240 -> 0 bytes docs/img/visualisation.png | Bin 0 -> 23984 bytes .../aruco_markers_pipeline/aoi_3d_description.md | 33 ++++++++------------- .../aruco_markers_pipeline/aoi_3d_frame.md | 30 +++++++++++++++---- .../aruco_markers_pipeline/aoi_3d_projection.md | 5 ++-- .../aruco_markers_description.md | 30 +++++++++---------- .../aruco_markers_pipeline/introduction.md | 6 ++-- .../aruco_markers_pipeline/pose_estimation.md | 8 ++--- .../gaze_analysis_pipeline/aoi_2d_description.md | 26 ++++++++-------- .../gaze_analysis_pipeline/aoi_analysis.md | 26 +++++++++++----- .../gaze_analysis_pipeline/background.md | 4 +-- .../configuration_and_execution.md | 4 +-- docs/user_guide/gaze_analysis_pipeline/heatmap.md | 2 +- .../timestamped_gaze_positions_edition.md | 2 +- .../gaze_analysis_pipeline/visualisation.md | 2 +- 60 files changed, 100 insertions(+), 78 deletions(-) create mode 100644 docs/img/aoi_2d_description.png create mode 100644 docs/img/aoi_matcher.png delete mode 100644 docs/img/aoi_projection.png delete mode 100644 docs/img/ar_frame_background.png delete mode 100644 docs/img/ar_frame_gaze_movement_identifier.png delete mode 100644 docs/img/ar_frame_heatmap.png delete mode 100644 docs/img/ar_frame_scan_path.png delete mode 100644 docs/img/ar_frame_visualisation.png delete mode 100644 docs/img/ar_layer_aoi_matcher.png delete mode 100644 docs/img/ar_layer_aoi_scan_path.png delete mode 100644 docs/img/ar_layer_aoi_scene.png create mode 100644 docs/img/aruco_camera_aoi_frame.png delete mode 100644 docs/img/aruco_scene.png create mode 100644 docs/img/background.png delete mode 100644 docs/img/circle_intersection.png delete mode 100644 docs/img/contains_point.png delete mode 100644 docs/img/detected_markers.png delete mode 100644 docs/img/distance.png delete mode 100644 docs/img/fixation_and_saccade.png create mode 100644 docs/img/gaze_movement_identifier.png delete mode 100644 docs/img/get_last_before.png delete mode 100644 docs/img/get_last_until.png create mode 100644 docs/img/opencv_aruco.png delete mode 100644 docs/img/optic_calibrated.png delete mode 100644 docs/img/optic_distorsion.png delete mode 100644 docs/img/overlapping.png delete mode 100644 docs/img/pop_last_before.png delete mode 100644 docs/img/pop_last_until.png delete mode 100644 docs/img/pose_estimation.png delete mode 100644 docs/img/vision_cone.png create mode 100644 docs/img/visualisation.png diff --git a/docs/img/aoi_2d_description.png b/docs/img/aoi_2d_description.png new file mode 100644 index 0000000..51a98b0 Binary files /dev/null and b/docs/img/aoi_2d_description.png differ diff --git a/docs/img/aoi_3d_description.png b/docs/img/aoi_3d_description.png index 794ef68..caf7efc 100644 Binary files a/docs/img/aoi_3d_description.png and b/docs/img/aoi_3d_description.png differ diff --git a/docs/img/aoi_matcher.png b/docs/img/aoi_matcher.png new file mode 100644 index 0000000..c9c6dcd Binary files /dev/null and b/docs/img/aoi_matcher.png differ diff --git a/docs/img/aoi_projection.png b/docs/img/aoi_projection.png deleted file mode 100644 index a83b9cd..0000000 Binary files a/docs/img/aoi_projection.png and /dev/null differ diff --git a/docs/img/aoi_scan_path.png b/docs/img/aoi_scan_path.png index 7cac491..80c65d4 100644 Binary files a/docs/img/aoi_scan_path.png and b/docs/img/aoi_scan_path.png differ diff --git a/docs/img/ar_frame.png b/docs/img/ar_frame.png index 65fa0ea..6ab7eeb 100644 Binary files a/docs/img/ar_frame.png and b/docs/img/ar_frame.png differ diff --git a/docs/img/ar_frame_background.png b/docs/img/ar_frame_background.png deleted file mode 100644 index 7bc16da..0000000 Binary files a/docs/img/ar_frame_background.png and /dev/null differ diff --git a/docs/img/ar_frame_gaze_movement_identifier.png b/docs/img/ar_frame_gaze_movement_identifier.png deleted file mode 100644 index 8a66cac..0000000 Binary files a/docs/img/ar_frame_gaze_movement_identifier.png and /dev/null differ diff --git a/docs/img/ar_frame_heatmap.png b/docs/img/ar_frame_heatmap.png deleted file mode 100644 index 812cc8f..0000000 Binary files a/docs/img/ar_frame_heatmap.png and /dev/null differ diff --git a/docs/img/ar_frame_scan_path.png b/docs/img/ar_frame_scan_path.png deleted file mode 100644 index 671d6a5..0000000 Binary files a/docs/img/ar_frame_scan_path.png and /dev/null differ diff --git a/docs/img/ar_frame_visualisation.png b/docs/img/ar_frame_visualisation.png deleted file mode 100644 index a9c9032..0000000 Binary files a/docs/img/ar_frame_visualisation.png and /dev/null differ diff --git a/docs/img/ar_layer.png b/docs/img/ar_layer.png index 418d879..ec42c22 100644 Binary files a/docs/img/ar_layer.png and b/docs/img/ar_layer.png differ diff --git a/docs/img/ar_layer_aoi_matcher.png b/docs/img/ar_layer_aoi_matcher.png deleted file mode 100644 index 63caf4d..0000000 Binary files a/docs/img/ar_layer_aoi_matcher.png and /dev/null differ diff --git a/docs/img/ar_layer_aoi_scan_path.png b/docs/img/ar_layer_aoi_scan_path.png deleted file mode 100644 index 1a4dad3..0000000 Binary files a/docs/img/ar_layer_aoi_scan_path.png and /dev/null differ diff --git a/docs/img/ar_layer_aoi_scene.png b/docs/img/ar_layer_aoi_scene.png deleted file mode 100644 index 96bfc12..0000000 Binary files a/docs/img/ar_layer_aoi_scene.png and /dev/null differ diff --git a/docs/img/aruco_camera_aoi_frame.png b/docs/img/aruco_camera_aoi_frame.png new file mode 100644 index 0000000..944f9ff Binary files /dev/null and b/docs/img/aruco_camera_aoi_frame.png differ diff --git a/docs/img/aruco_camera_aoi_projection.png b/docs/img/aruco_camera_aoi_projection.png index 59a8ab0..ec708db 100644 Binary files a/docs/img/aruco_camera_aoi_projection.png and b/docs/img/aruco_camera_aoi_projection.png differ diff --git a/docs/img/aruco_camera_frame.png b/docs/img/aruco_camera_frame.png index 443285f..0825f18 100644 Binary files a/docs/img/aruco_camera_frame.png and b/docs/img/aruco_camera_frame.png differ diff --git a/docs/img/aruco_camera_gaze_movement_identification.png b/docs/img/aruco_camera_gaze_movement_identification.png index fc9ff39..34cc74f 100644 Binary files a/docs/img/aruco_camera_gaze_movement_identification.png and b/docs/img/aruco_camera_gaze_movement_identification.png differ diff --git a/docs/img/aruco_camera_markers_detection.png b/docs/img/aruco_camera_markers_detection.png index 6192e09..a954313 100644 Binary files a/docs/img/aruco_camera_markers_detection.png and b/docs/img/aruco_camera_markers_detection.png differ diff --git a/docs/img/aruco_camera_pose_estimation.png b/docs/img/aruco_camera_pose_estimation.png index b6c2675..ebc1993 100644 Binary files a/docs/img/aruco_camera_pose_estimation.png and b/docs/img/aruco_camera_pose_estimation.png differ diff --git a/docs/img/aruco_dictionaries.png b/docs/img/aruco_dictionaries.png index ed5f287..033bbfb 100644 Binary files a/docs/img/aruco_dictionaries.png and b/docs/img/aruco_dictionaries.png differ diff --git a/docs/img/aruco_markers_description.png b/docs/img/aruco_markers_description.png index 2f3d1c2..b840fcd 100644 Binary files a/docs/img/aruco_markers_description.png and b/docs/img/aruco_markers_description.png differ diff --git a/docs/img/aruco_scene.png b/docs/img/aruco_scene.png deleted file mode 100644 index d8aea8e..0000000 Binary files a/docs/img/aruco_scene.png and /dev/null differ diff --git a/docs/img/background.png b/docs/img/background.png new file mode 100644 index 0000000..3faef14 Binary files /dev/null and b/docs/img/background.png differ diff --git a/docs/img/circle_intersection.png b/docs/img/circle_intersection.png deleted file mode 100644 index 6893d32..0000000 Binary files a/docs/img/circle_intersection.png and /dev/null differ diff --git a/docs/img/contains_point.png b/docs/img/contains_point.png deleted file mode 100644 index 71a1050..0000000 Binary files a/docs/img/contains_point.png and /dev/null differ diff --git a/docs/img/detected_markers.png b/docs/img/detected_markers.png deleted file mode 100644 index 588364d..0000000 Binary files a/docs/img/detected_markers.png and /dev/null differ diff --git a/docs/img/distance.png b/docs/img/distance.png deleted file mode 100644 index 31cd249..0000000 Binary files a/docs/img/distance.png and /dev/null differ diff --git a/docs/img/fixation_and_saccade.png b/docs/img/fixation_and_saccade.png deleted file mode 100644 index 1bd91b9..0000000 Binary files a/docs/img/fixation_and_saccade.png and /dev/null differ diff --git a/docs/img/gaze_movement_identifier.png b/docs/img/gaze_movement_identifier.png new file mode 100644 index 0000000..14dfad8 Binary files /dev/null and b/docs/img/gaze_movement_identifier.png differ diff --git a/docs/img/get_last_before.png b/docs/img/get_last_before.png deleted file mode 100644 index 97d4170..0000000 Binary files a/docs/img/get_last_before.png and /dev/null differ diff --git a/docs/img/get_last_until.png b/docs/img/get_last_until.png deleted file mode 100644 index 4af2c26..0000000 Binary files a/docs/img/get_last_until.png and /dev/null differ diff --git a/docs/img/heatmap.png b/docs/img/heatmap.png index 5f07d77..534ccc7 100644 Binary files a/docs/img/heatmap.png and b/docs/img/heatmap.png differ diff --git a/docs/img/opencv_aruco.png b/docs/img/opencv_aruco.png new file mode 100644 index 0000000..0aa161e Binary files /dev/null and b/docs/img/opencv_aruco.png differ diff --git a/docs/img/optic_calibrated.png b/docs/img/optic_calibrated.png deleted file mode 100644 index 586c4d6..0000000 Binary files a/docs/img/optic_calibrated.png and /dev/null differ diff --git a/docs/img/optic_distorsion.png b/docs/img/optic_distorsion.png deleted file mode 100644 index 2de9937..0000000 Binary files a/docs/img/optic_distorsion.png and /dev/null differ diff --git a/docs/img/overlapping.png b/docs/img/overlapping.png deleted file mode 100644 index 0fc1b72..0000000 Binary files a/docs/img/overlapping.png and /dev/null differ diff --git a/docs/img/point_spread.png b/docs/img/point_spread.png index 7ee39bc..9d14a40 100644 Binary files a/docs/img/point_spread.png and b/docs/img/point_spread.png differ diff --git a/docs/img/pop_last_before.png b/docs/img/pop_last_before.png deleted file mode 100644 index 15d02a0..0000000 Binary files a/docs/img/pop_last_before.png and /dev/null differ diff --git a/docs/img/pop_last_until.png b/docs/img/pop_last_until.png deleted file mode 100644 index 94b0c37..0000000 Binary files a/docs/img/pop_last_until.png and /dev/null differ diff --git a/docs/img/pose_estimation.png b/docs/img/pose_estimation.png deleted file mode 100644 index d814575..0000000 Binary files a/docs/img/pose_estimation.png and /dev/null differ diff --git a/docs/img/scan_path.png b/docs/img/scan_path.png index 1c77598..72af153 100644 Binary files a/docs/img/scan_path.png and b/docs/img/scan_path.png differ diff --git a/docs/img/scene.png b/docs/img/scene.png index 818c301..251c7bf 100644 Binary files a/docs/img/scene.png and b/docs/img/scene.png differ diff --git a/docs/img/timestamped_gaze_positions.png b/docs/img/timestamped_gaze_positions.png index cc08ec0..c639019 100644 Binary files a/docs/img/timestamped_gaze_positions.png and b/docs/img/timestamped_gaze_positions.png differ diff --git a/docs/img/vision_cone.png b/docs/img/vision_cone.png deleted file mode 100644 index 19c5583..0000000 Binary files a/docs/img/vision_cone.png and /dev/null differ diff --git a/docs/img/visualisation.png b/docs/img/visualisation.png new file mode 100644 index 0000000..9076e7e Binary files /dev/null and b/docs/img/visualisation.png differ diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md index 5a1a16e..502f905 100644 --- a/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md +++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md @@ -3,7 +3,7 @@ Describe 3D AOI Once [ArUco markers are placed into a scene](aruco_markers_description.md), [areas of interest (AOI)](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) need to be described into the same 3D referential. -In the example scene, each screen is considered as an area of interest more the blue triangle area inside the top screen. +In the example scene, the screen and the sheet are considered as areas of interest. ![3D AOI description](../../img/aoi_3d_description.png) @@ -21,26 +21,20 @@ All AOI need to be described from same origin than markers in a [right-handed 3D OBJ file format could be exported from most 3D editors. ``` obj -o YellowSquare -v 6.200003 -7.275252 25.246159 -v 31.200003 -7.275252 25.246159 -v 6.200003 1.275252 1.753843 -v 31.200003 1.275252 1.753843 +o Sheet +v 14.200000 -3.000000 28.350000 +v 35.200000 -3.000000 28.350000 +v 14.200000 -3.000000 -1.35 +v 35.200000 -3.000000 -1.35 s off f 1 2 4 3 -o GrayRectangle -v 2.500000 2.500000 -0.500000 -v 37.500000 2.500000 -0.500000 -v 2.500000 27.500000 -0.500000 -v 37.500000 27.500000 -0.500000 +o Screen +v 2.750000 2.900000 -0.500000 +v 49.250000 2.900000 -0.500000 +v 2.750000 29.100000 -0.500000 +v 49.250000 29.100000 -0.500000 s off f 5 6 8 7 -o BlueTriangle -v 12.500002 7.500000 -0.500000 -v 27.500002 7.500000 -0.500000 -v 20.000002 22.500000 -0.500000 -s off -f 9 10 11 ``` Here are common OBJ file features needed to describe AOI: @@ -55,8 +49,7 @@ JSON file format allows to describe AOI vertices. ``` json { - "YellowSquare": [[6.2, -7.275252, 25.246159], [31.2, -7.275252, 25.246159], [31.2, 1.275252, 1.753843], [6.2, 1.275252, 1.753843]], - "GrayRectangle": [[2.5, 2.5, -0.5], [37.5, 2.5, -0.5], [37.5, 27.5, -0.5], [2.5, 27.5, -0.5]], - "BlueTriangle": [[12.5, 7.5, -0.5], [27.5, 7.5, -0.5], [20, 22.5, -0.5]] + "Sheet": [[14.2, -3, 28.35], [35.2, -3, 28.35], [14.2, -3, -1.35], [35.2, -3, -1.35]], + "Screen": [[2.75, 2.9, -0.5], [49.25, 2.9, -0.5], [2.75, 29.1, -0.5], [49.25, 29.1, -0.5]] } ``` diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md index 8075426..032e2b6 100644 --- a/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md +++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md @@ -3,11 +3,13 @@ Define a 3D AOI as a frame When an 3D AOI of the scene contains others coplanar 3D AOI, like a screen with GUI elements displayed on, it is better to described them as 2D AOI inside 2D coordinates system related to the containing 3D AOI. +![3D AOI frame](../../img/aruco_camera_aoi_frame.png) + ## Add ArFrame to ArUcoScene The [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) class defines a rectangular area where timestamped gaze positions are projected in and inside which they need to be analyzed. -Here is the previous extract where "MyScreen" AOI is defined as a frame into [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) configuration: +Here is the previous extract where "Screen" AOI is defined as a frame into [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) configuration: ```json { @@ -22,18 +24,34 @@ Here is the previous extract where "MyScreen" AOI is defined as a frame into [Ar "layers": { "MyLayer": { "aoi_scene": { - "MyScreen": [[2.5, 2.5, -0.5], [37.5, 2.5, -0.5], [37.5, 27.5, -0.5], [2.5, 27.5, -0.5]] + "Sheet": [[14.2, -3, 28.35], [35.2, -3, 28.35], [14.2, -3, -1.35], [35.2, -3, -1.35]], + "Screen": [[2.75, 2.9, -0.5], [49.25, 2.9, -0.5], [2.75, 29.1, -0.5], [49.25, 29.1, -0.5]] } } }, "frames": { - "MyScreen": { - "size": [350, 250], + "Screen": { + "size": [1920, 1080], "layers": { "MyLayer": { "aoi_scene": { - "BlueTriangle": [[100, 50], [250, 50], [175, 200]] - } + "GeoSector": [[860, 160], [1380, 100], [1660, 400], [1380, 740], [1440, 960], [920, 920], [680, 800], [640, 560]], + "LeftPanel": { + "Rectangle": { + "x": 0, + "y": 0, + "width": 350, + "height": 1080 + } + }, + "CircularWidget": { + "Circle": { + "cx": 1800, + "cy": 120, + "radius": 80 + } + } + } } } } diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md index acbe31d..0d58d9a 100644 --- a/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md +++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md @@ -24,9 +24,8 @@ Here is the previous extract where one layer is added to [ArUcoScene](../../arga "layers": { "MyLayer": { "aoi_scene": { - "YellowSquare": [[6.2, -7.275252, 25.246159], [31.2, -7.275252, 25.246159], [31.2, 1.275252, 1.753843], [6.2, 1.275252, 1.753843]], - "GrayRectangle": [[2.5, 2.5, -0.5], [37.5, 2.5, -0.5], [37.5, 27.5, -0.5], [2.5, 27.5, -0.5]], - "BlueTriangle": [[12.5, 7.5, -0.5], [27.5, 7.5, -0.5], [20, 22.5, -0.5]] + "Sheet": [[14.2, -3, 28.35], [35.2, -3, 28.35], [14.2, -3, -1.35], [35.2, -3, -1.35]], + "Screen": [[2.75, 2.9, -0.5], [49.25, 2.9, -0.5], [2.75, 29.1, -0.5], [49.25, 29.1, -0.5]] } } } diff --git a/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md b/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md index b3ea2bb..3addcab 100644 --- a/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md +++ b/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md @@ -3,7 +3,7 @@ Set up ArUco markers First of all, ArUco markers needs to be printed and placed into the scene. -Here is an example scene where markers are surrounding a multi-screen workspace with a triangle area inside one of them. +Here is an example scene where markers are surrounding a workspace with a screen and a sheet on the table. ![Scene](../../img/scene.png) @@ -69,19 +69,19 @@ vn 0.0000 0.0000 1.0000 s off f 1//1 2//1 4//1 3//1 o DICT_APRILTAG_16h5#1_Marker -v -1.767767 23.000002 3.767767 -v 1.767767 23.000002 0.232233 -v -1.767767 28.000002 3.767767 -v 1.767767 28.000002 0.232233 -vn 0.7071 0.0000 0.7071 +v -0.855050 24.000002 4.349232 +v 0.855050 24.000002 -0.349231 +v -0.855050 29.000002 4.349232 +v 0.855050 29.000002 -0.349231 +vn 0.9397 0.0000 0.3420 s off f 5//2 6//2 8//2 7//2 o DICT_APRILTAG_16h5#2_Marker -v 33.000000 -1.767767 4.767767 -v 38.000000 -1.767767 4.767767 -v 33.000000 1.767767 1.232233 -v 38.000000 1.767767 1.232233 -vn 0.0000 0.7071 0.7071 +v 44.000000 0.000000 9.500000 +v 49.000000 0.000000 9.500000 +v 44.000000 -0.000000 4.500000 +v 49.000000 -0.000000 4.500000 +vn 0.0000 1.0000 -0.0000 s off f 9//3 10//3 12//3 11//3 ``` @@ -110,12 +110,12 @@ JSON file format allows to describe markers places using translation and euler a "rotation": [0, 0, 0] }, "1": { - "translation": [0, 25.5, 2], - "rotation": [0, 45, 0] + "translation": [0, 26.5, 2], + "rotation": [0, 70, 0] }, "2": { - "translation": [35.5, 0, 3], - "rotation": [-45, 0, 0] + "translation": [46.5, 0, 7], + "rotation": [-90, 0, 0] } } } diff --git a/docs/user_guide/aruco_markers_pipeline/introduction.md b/docs/user_guide/aruco_markers_pipeline/introduction.md index 5a07b49..26294f7 100644 --- a/docs/user_guide/aruco_markers_pipeline/introduction.md +++ b/docs/user_guide/aruco_markers_pipeline/introduction.md @@ -1,11 +1,11 @@ Overview ======== -This section explains how to build augmented reality pipelines based on ArUco Markers technology for various use cases. +This section explains how to build augmented reality pipelines based on [ArUco Markers technology](https://www.sciencedirect.com/science/article/abs/pii/S0031320314000235) for various use cases. -The OpenCV library provides a module to detect fiducial markers into a picture and estimate their poses (cf [OpenCV ArUco tutorial page](https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html)). +The OpenCV library provides a module to detect fiducial markers into a picture and estimate their poses. -![OpenCV ArUco markers](https://pyimagesearch.com/wp-content/uploads/2020/12/aruco_generate_tags_header.png) +![OpenCV ArUco markers](../../img/opencv_aruco.png) The ArGaze [ArUcoMarkers submodule](../../argaze.md/#argaze.ArUcoMarkers) eases markers creation, markers detection and 3D scene pose estimation through a set of high level classes. diff --git a/docs/user_guide/aruco_markers_pipeline/pose_estimation.md b/docs/user_guide/aruco_markers_pipeline/pose_estimation.md index 5dcde6f..6027039 100644 --- a/docs/user_guide/aruco_markers_pipeline/pose_estimation.md +++ b/docs/user_guide/aruco_markers_pipeline/pose_estimation.md @@ -27,12 +27,12 @@ Here is an extract from the JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMark "rotation": [0, 0, 0] }, "1": { - "translation": [0, 25.5, 2], - "rotation": [0, 45, 0] + "translation": [0, 26.5, 2], + "rotation": [0, 70, 0] }, "2": { - "translation": [35.5, 0, 3], - "rotation": [-45, 0, 0] + "translation": [46.5, 0, 7], + "rotation": [-90, 0, 0] } } } diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md b/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md index 0d5dbf0..ad8ee74 100644 --- a/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md +++ b/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md @@ -19,9 +19,9 @@ SVG file format could be exported from most vector graphics editors. ``` xml - - - + + + ``` @@ -37,20 +37,20 @@ JSON file format allows to describe AOI. ``` json { - "BlueTriangle":[[960, 664], [1113, 971], [806, 971]], - "RedSquare": { + "GeoSector": [[860, 160], [1380, 100], [1660, 400], [1380, 740], [1440, 960], [920, 920], [680, 800], [640, 560]], + "LeftPanel": { "Rectangle": { - "x": 268, - "y": 203, - "width": 308, - "height": 308 + "x": 0, + "y": 0, + "width": 350, + "height": 1080 } }, - "GreenCircle": { + "CircularWidget": { "Circle": { - "cx": 1497, - "cy": 356, - "radius": 153 + "cx": 1800, + "cy": 120, + "radius": 80 } } } diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md index 9d2b3df..b282f80 100644 --- a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md +++ b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md @@ -19,10 +19,22 @@ Here is an extract from the JSON [ArFrame](../../argaze.md/#argaze.ArFeatures.Ar "layers": { "MyLayer": { "aoi_scene" : { - "upper_left_area": [[0, 0], [960, 0], [960, 540], [0, 540]], - "upper_right_area": [[960, 0], [1920, 0], [1920, 540], [960, 540]], - "lower_left_area": [[0, 540], [960, 540], [960, 1080], [0, 1080]], - "lower_right_area": [[960, 540], [1920, 540], [1920, 1080], [960, 1080]] + "GeoSector": [[860, 160], [1380, 100], [1660, 400], [1380, 740], [1440, 960], [920, 920], [680, 800], [640, 560]], + "LeftPanel": { + "Rectangle": { + "x": 0, + "y": 0, + "width": 350, + "height": 1080 + } + }, + "CircularWidget": { + "Circle": { + "cx": 1800, + "cy": 120, + "radius": 80 + } + } }, "aoi_matcher": { "DeviationCircleCoverage": { @@ -63,13 +75,13 @@ The name of an [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically The set of 2D AOI into the layer as defined at [2D AOI description chapter](aoi_2d_description.md). -![AOI Scene](../../img/ar_layer_aoi_scene.png) +![AOI scene](../../img/aoi_2d_description.png) ### *aoi_matcher* The first [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step aims to make match identified gaze movement with a layer's AOI. -![AOI Matcher](../../img/ar_layer_aoi_matcher.png) +![AOI matcher](../../img/aoi_matcher.png) The matching algorithm can be selected by instantiating a particular [AOIMatcher from GazeAnalysis submodule](pipeline_modules/aoi_matchers.md) or [from another python package](advanced_topics/module_loading.md). @@ -82,7 +94,7 @@ In the example file, the choosen matching algorithm is the [Deviation Circle Cov The second [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) pipeline step aims to build a [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) defined as a list of [AOIScanSteps](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) made by a set of successive fixations/saccades onto a same AOI. -![AOI Scan Path](../../img/ar_layer_aoi_scan_path.png) +![AOI scan path](../../img/aoi_scan_path.png) Once gaze movements are matched to AOI, they are automatically appended to the AOIScanPath if required. diff --git a/docs/user_guide/gaze_analysis_pipeline/background.md b/docs/user_guide/gaze_analysis_pipeline/background.md index a7d59f6..ee27495 100644 --- a/docs/user_guide/gaze_analysis_pipeline/background.md +++ b/docs/user_guide/gaze_analysis_pipeline/background.md @@ -3,7 +3,7 @@ Add a background Background is an optional [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) attribute to display any image behind pipeline visualisation. -![Background](../../img/ar_frame_background.png) +![Background](../../img/background.png) ## Load and display ArFrame background @@ -16,7 +16,7 @@ Here is an extract from the JSON ArFrame configuration file where a background p "name": "My FullHD screen", "size": [1920, 1080], ... - "background": "./joconde.png", + "background": "./bosch.png", ... "image_parameters": { ... diff --git a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md index 8ddd97a..3b21cbd 100644 --- a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md +++ b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md @@ -59,7 +59,7 @@ The size of the [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) defines th The first [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline step is to identify fixations or saccades from consecutive timestamped gaze positions. -![Gaze Movement Identifier](../../img/ar_frame_gaze_movement_identifier.png) +![Gaze movement identifier](../../img/gaze_movement_identifier.png) The identification algorithm can be selected by instantiating a particular [GazeMovementIdentifier from GazeAnalysis submodule](pipeline_modules/gaze_movement_identifiers.md) or [from another python package](advanced_topics/module_loading.md). @@ -75,7 +75,7 @@ In the example file, the choosen identification algorithm is the [Dispersion Thr The second [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline step aims to build a [ScanPath](../../argaze.md/#argaze.GazeFeatures.ScanPath) defined as a list of [ScanSteps](../../argaze.md/#argaze.GazeFeatures.ScanStep) made by a fixation and a consecutive saccade. -![Scan Path](../../img/ar_frame_scan_path.png) +![Scan path](../../img/scan_path.png) Once fixations and saccades are identified, they are automatically appended to the ScanPath if required. diff --git a/docs/user_guide/gaze_analysis_pipeline/heatmap.md b/docs/user_guide/gaze_analysis_pipeline/heatmap.md index fe4246e..5310d64 100644 --- a/docs/user_guide/gaze_analysis_pipeline/heatmap.md +++ b/docs/user_guide/gaze_analysis_pipeline/heatmap.md @@ -3,7 +3,7 @@ Add a heatmap Heatmap is an optional [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline step. It is executed at each new gaze position to update heatmap image. -![Heatmap](../../img/ar_frame_heatmap.png) +![Heatmap](../../img/heatmap.png) ## Enable and display ArFrame heatmap diff --git a/docs/user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md b/docs/user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md index 93d2a65..2156f3b 100644 --- a/docs/user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md +++ b/docs/user_guide/gaze_analysis_pipeline/timestamped_gaze_positions_edition.md @@ -3,7 +3,7 @@ Edit timestamped gaze positions Whatever eye data comes from a file on disk or from a live stream, timestamped gaze positions are required before to go further. -![Timestamped Gaze Positions](../../img/timestamped_gaze_positions.png) +![Timestamped gaze positions](../../img/timestamped_gaze_positions.png) ## Import gaze positions from CSV file diff --git a/docs/user_guide/gaze_analysis_pipeline/visualisation.md b/docs/user_guide/gaze_analysis_pipeline/visualisation.md index c9cbf2c..cf6fa41 100644 --- a/docs/user_guide/gaze_analysis_pipeline/visualisation.md +++ b/docs/user_guide/gaze_analysis_pipeline/visualisation.md @@ -3,7 +3,7 @@ Visualize pipeline steps Visualisation is not a pipeline step but each [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline steps outputs can be drawn in real time or afterward, depending of application purpose. -![ArFrame visualisation](../../img/ar_frame_visualisation.png) +![ArFrame visualisation](../../img/visualisation.png) ## Add image parameters to ArFrame JSON configuration file -- cgit v1.1 From eddfdc69b27c1b32ba0001ba4f147810eabec549 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 27 Sep 2023 18:03:29 +0200 Subject: Adding annotation. --- src/argaze/ArUcoMarkers/ArUcoDetector.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py index 3260d00..305bee2 100644 --- a/src/argaze/ArUcoMarkers/ArUcoDetector.py +++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py @@ -103,7 +103,7 @@ class DetectorParameters(): return f'{self}' - def __format__(self, spec) -> str: + def __format__(self, spec: str) -> str: """Formated detector parameters string representation. Parameters: -- cgit v1.1 From 66b84b019fe760a2cb9901a9f17b2d202d935ba4 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 27 Sep 2023 18:03:57 +0200 Subject: Allowing to load ellipse from SVG file. --- src/argaze/AreaOfInterest/AOI2DScene.py | 15 +++++++++++++++ src/argaze/AreaOfInterest/AOIFeatures.py | 11 +++++++++++ 2 files changed, 26 insertions(+) diff --git a/src/argaze/AreaOfInterest/AOI2DScene.py b/src/argaze/AreaOfInterest/AOI2DScene.py index f8599c5..062044f 100644 --- a/src/argaze/AreaOfInterest/AOI2DScene.py +++ b/src/argaze/AreaOfInterest/AOI2DScene.py @@ -91,6 +91,21 @@ class AOI2DScene(AOIFeatures.AOIScene): new_areas[circle.getAttribute('id')] = AOIFeatures.AreaOfInterest.from_dict(circle_dict) + # Load SVG ellipse + for ellipse in description_file.getElementsByTagName('ellipse'): + + # Convert ellipse element into dict + ellipse_dict = { + "Ellipse": { + 'cx': float(circle.getAttribute('cx')), + 'cy': float(circle.getAttribute('cy')), + 'rx': float(circle.getAttribute('rx')), + 'ry': float(circle.getAttribute('ry')) + } + } + + new_areas[ellipse.getAttribute('id')] = AOIFeatures.AreaOfInterest.from_dict(ellipse_dict) + return AOI2DScene(new_areas) def draw(self, image: numpy.array, draw_aoi: dict = None, exclude=[]): diff --git a/src/argaze/AreaOfInterest/AOIFeatures.py b/src/argaze/AreaOfInterest/AOIFeatures.py index dfbb165..2d5b9b1 100644 --- a/src/argaze/AreaOfInterest/AOIFeatures.py +++ b/src/argaze/AreaOfInterest/AOIFeatures.py @@ -78,6 +78,17 @@ class AreaOfInterest(numpy.ndarray): return AreaOfInterest(points) + elif shape == 'Ellipse': + + cx = shape_data.pop('cx') + cy = shape_data.pop('cy') + rx = shape_data.pop('rx') + ry = shape_data.pop('ry') + + # TODO: Use pygeos + N = 32 + points = [(math.cos(2*math.pi / N*x) * rx + cx, math.sin(2*math.pi / N*x) * ry + cy) for x in range(0, N+1)] + @property def dimension(self) -> int: """Number of axis coding area points positions.""" -- cgit v1.1 From 2d59cfc56590ed356a30d28cc52c00b533ab7a9e Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 27 Sep 2023 23:08:38 +0200 Subject: Removing hidden sections and chapters. --- docs/index.md | 2 +- docs/user_guide/areas_of_interest/aoi_matching.md | 48 ------ docs/user_guide/areas_of_interest/heatmap.md | 40 ----- docs/user_guide/areas_of_interest/introduction.md | 8 - .../areas_of_interest/vision_cone_filtering.md | 18 --- docs/user_guide/gaze_features/gaze_movement.md | 163 -------------------- docs/user_guide/gaze_features/gaze_position.md | 98 ------------ docs/user_guide/gaze_features/introduction.md | 7 - docs/user_guide/gaze_features/scan_path.md | 169 --------------------- .../timestamped_data/data_synchronisation.md | 106 ------------- docs/user_guide/timestamped_data/introduction.md | 6 - .../timestamped_data/ordered_dictionary.md | 19 --- .../pandas_dataframe_conversion.md | 41 ----- .../timestamped_data/saving_and_loading.md | 14 -- mkdocs.yml | 19 --- 15 files changed, 1 insertion(+), 757 deletions(-) delete mode 100644 docs/user_guide/areas_of_interest/aoi_matching.md delete mode 100644 docs/user_guide/areas_of_interest/heatmap.md delete mode 100644 docs/user_guide/areas_of_interest/introduction.md delete mode 100644 docs/user_guide/areas_of_interest/vision_cone_filtering.md delete mode 100644 docs/user_guide/gaze_features/gaze_movement.md delete mode 100644 docs/user_guide/gaze_features/gaze_position.md delete mode 100644 docs/user_guide/gaze_features/introduction.md delete mode 100644 docs/user_guide/gaze_features/scan_path.md delete mode 100644 docs/user_guide/timestamped_data/data_synchronisation.md delete mode 100644 docs/user_guide/timestamped_data/introduction.md delete mode 100644 docs/user_guide/timestamped_data/ordered_dictionary.md delete mode 100644 docs/user_guide/timestamped_data/pandas_dataframe_conversion.md delete mode 100644 docs/user_guide/timestamped_data/saving_and_loading.md diff --git a/docs/index.md b/docs/index.md index f234a94..00e2e29 100644 --- a/docs/index.md +++ b/docs/index.md @@ -24,7 +24,7 @@ Once incoming data are formatted as required, all those gaze analysis features c ## Augmented reality based on ArUco markers pipeline -Things goes harder when gaze data comes from head-mounted eye tracker devices. That's why **ArGaze** provides **Augmented Reality (AR)** support to map **Areas Of Interest (AOI)** on OpenCV ArUco markers. +Things goes harder when gaze data comes from head-mounted eye tracker devices. That's why **ArGaze** provides **Augmented Reality (AR)** support to map **Areas Of Interest (AOI)** on [OpenCV ArUco markers](https://www.sciencedirect.com/science/article/abs/pii/S0031320314000235). ![ArUco pipeline axis](img/aruco_pipeline_axis.png) diff --git a/docs/user_guide/areas_of_interest/aoi_matching.md b/docs/user_guide/areas_of_interest/aoi_matching.md deleted file mode 100644 index 60467f9..0000000 --- a/docs/user_guide/areas_of_interest/aoi_matching.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: AOI matching ---- - -AOI matching -============ - -Once [AOI3DScene](../../argaze.md/#argaze.AreaOfInterest.AOI3DScene) is projected as [AOI2DScene](../../argaze.md/#argaze.AreaOfInterest.AOI2DScene), it could be needed to know which AOI is looked. - -The [AreaOfInterest](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) class in [AOIFeatures](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures) provides two ways to accomplish such task. - -## Pointer-based matching - -Test if 2D pointer is inside or not AOI using contains_point() method as illustrated below. - -![Contains point](../../img/contains_point.png) - -``` python -pointer = (x, y) - -for name, aoi in aoi2D_scene.items(): - - if aoi.contains_point(pointer): - - # Do something with looked aoi - ... - -``` - -It is also possible to get where a pointer is looking inside an AOI provided that AOI is a rectangular plane: - -``` python - -inner_x, inner_y = aoi.inner_axis(pointer) - -``` - -## Circle-based matching - -As positions have limited accuracy, it is possible to define a radius around a pointer to test circle intersection with AOI. - -![Circle intersection](../../img/circle_intersection.png) - -``` python - -intersection_shape, intersection_aoi_ratio, intersection_circle_ratio = aoi.circle_intersection(pointer, radius) - -``` diff --git a/docs/user_guide/areas_of_interest/heatmap.md b/docs/user_guide/areas_of_interest/heatmap.md deleted file mode 100644 index 450c033..0000000 --- a/docs/user_guide/areas_of_interest/heatmap.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Heatmap ---- - -Heatmap -========= - -[AOIFeatures](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures) provides [Heatmap](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.Heatmap) class to draw heatmap image. - -## Point spread - -The **point_spread** method draw a gaussian point spread into heatmap image at a given pointer position. - -![Point spread](../../img/point_spread.png) - -## Heatmap - -Heatmap visualisation allows to show where a pointer is most of the time. - -![Heatmap](../../img/heatmap.png) - -```python -from argaze.AreaOfInterest import AOIFeatures - -# Create heatmap of 800px * 600px resolution -heatmap = AOIFeatures.Heatmap((800, 600)) - -# Initialize heatmap -heatmap.init() - -# Assuming a pointer position (x, y) is moving inside frame -...: - - # Update heatmap at pointer position - heatmap.update((x, y), sigma=0.05) - - # Do something with heatmap image - ... heatmap.image - -``` \ No newline at end of file diff --git a/docs/user_guide/areas_of_interest/introduction.md b/docs/user_guide/areas_of_interest/introduction.md deleted file mode 100644 index 9467963..0000000 --- a/docs/user_guide/areas_of_interest/introduction.md +++ /dev/null @@ -1,8 +0,0 @@ -About Areas Of Interest (AOI) -============================= - -The [AreaOfInterest submodule](../../argaze.md/#argaze.AreaOfInterest) allows to deal with AOI through a set of high level classes: - -* [AOIFeatures](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures) -* [AOI3DScene](../../argaze.md/#argaze.AreaOfInterest.AOI3DScene) -* [AOI2DScene](../../argaze.md/#argaze.AreaOfInterest.AOI2DScene) \ No newline at end of file diff --git a/docs/user_guide/areas_of_interest/vision_cone_filtering.md b/docs/user_guide/areas_of_interest/vision_cone_filtering.md deleted file mode 100644 index 5c377bf..0000000 --- a/docs/user_guide/areas_of_interest/vision_cone_filtering.md +++ /dev/null @@ -1,18 +0,0 @@ -Vision cone filtering -===================== - -The [AOI3DScene](../../argaze.md/#argaze.AreaOfInterest.AOI3DScene) provides cone clipping support in order to select only AOI which are inside vision cone field. - -![Vision cone](../../img/vision_cone.png) - -``` python -# Transform scene into camera referential -aoi3D_camera = aoi3D_scene.transform(tvec, rmat) - -# Get aoi inside vision cone field -# The vision cone tip is positionned behind the head -aoi3D_inside, aoi3D_outside = aoi3D_camera.vision_cone(cone_radius=300, cone_height=150, cone_tip=[0., 0., -20.]) - -# Keep only aoi inside vision cone field -aoi3D_scene = aoi3D_scene.copy(exclude=aoi3D_outside.keys()) -``` diff --git a/docs/user_guide/gaze_features/gaze_movement.md b/docs/user_guide/gaze_features/gaze_movement.md deleted file mode 100644 index 83f67e1..0000000 --- a/docs/user_guide/gaze_features/gaze_movement.md +++ /dev/null @@ -1,163 +0,0 @@ -Gaze movement -============= - -## Definition - -!!! note - - *"The act of classifying eye movements into distinct events is, on a general level, driven by a desire to isolate different intervals of the data stream strongly correlated with certain oculomotor or cognitive properties."* - - Citation from ["One algorithm to rule them all? An evaluation and discussion of ten eye movement event-detection algorithms"](https://link.springer.com/article/10.3758/s13428-016-0738-9) article. - -[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines abstract [GazeMovement](../../argaze.md/#argaze.GazeFeatures.GazeMovement) class, then abstract [Fixation](../../argaze.md/#argaze.GazeFeatures.Fixation) and [Saccade](../../argaze.md/#argaze.GazeFeatures.Saccade) classes which inherit from [GazeMovement](../../argaze.md/#argaze.GazeFeatures.GazeMovement). - -The **positions** [GazeMovement](../../argaze.md/#argaze.GazeFeatures.GazeMovement) attribute contain all [GazePositions](../../argaze.md/#argaze.GazeFeatures.GazePosition) belonging to itself. - -![Fixation and Saccade](../../img/fixation_and_saccade.png) - -## Identification - -[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines abstract [GazeMovementIdentifier](../../argaze.md/#argaze.GazeFeatures.GazeMovementIdentifier) classe to let add various identification algorithms. - -Some gaze movement identification algorithms are available thanks to [GazeAnalysis](../../argaze.md/#argaze.GazeAnalysis) submodule: - -* [Dispersion threshold identification (I-DT)](../../argaze.md/#argaze.GazeAnalysis.DispersionThresholdIdentification) -* [Velocity threshold identification (I-VT)](../../argaze.md/#argaze.GazeAnalysis.VelocityThresholdIdentification) - -### Identify method - -[GazeMovementIdentifier.identify](../../argaze.md/#argaze.GazeFeatures.GazeMovementIdentifier.identify) method allows to fed its identification algorithm with successive gaze positions to output Fixation, Saccade or any kind of GazeMovement instances. - -Here is a sample of code based on [I-DT](../../argaze.md/#argaze.GazeAnalysis.DispersionThresholdIdentification) algorithm to illustrate how to use it: - -``` python -from argaze import GazeFeatures -from argaze.GazeAnalysis import DispersionThresholdIdentification - -# Create a gaze movement identifier based on dispersion algorithm with 50px max deviation 200 ms max duration thresholds -gaze_movement_identifier = DispersionThresholdIdentification.GazeMovementIdentifier(50, 200) - -# Assuming that timestamped gaze positions are provided through live stream or later data reading -...: - - gaze_movement = gaze_movement_identifier.identify(timestamp, gaze_position) - - # Fixation identified - if GazeFeatures.is_fixation(gaze_movement): - - # Access to first gaze position of identified fixation - start_ts, start_position = gaze_movement.positions.first - - # Access to fixation duration - print('duration: {gaze_movement.duration}') - - # Iterate over all gaze positions of identified fixation - for ts, position in gaze_movement.positions.items(): - - # Do something with each fixation position - ... - - # Saccade identified - elif GazeFeatures.is_saccade(gaze_movement): - - # Access to first gaze position of identified saccade - start_ts, start_position = gaze_movement.positions.first - - # Access to saccade amplitude - print('amplitude: {gaze_movement.amplitude}') - - # Iterate over all gaze positions of identified saccade - for ts, position in gaze_movement.positions.items(): - - # Do something with each saccade position - ... - - # No gaze movement identified - else: - - continue - -``` - -### Browse method - -[GazeMovementIdentifier.browse](../../argaze.md/#argaze.GazeFeatures.GazeMovementIdentifier.browse) method allows to pass a [TimeStampedGazePositions](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazePositions) buffer to apply identification algorithm on all gaze positions inside. - -Identified gaze movements are returned through: - -* [TimeStampedGazeMovements](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeMovements) instance where all fixations are stored by starting gaze position timestamp. -* [TimeStampedGazeMovements](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeMovements) instance where all saccades are stored by starting gaze position timestamp. -* [TimeStampedGazeStatus](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeStatus) instance where all gaze positions are linked to a fixation or saccade index. - -``` python -# Assuming that timestamped gaze positions are provided through data reading - -ts_fixations, ts_saccades, ts_status = gaze_movement_identifier.browse(ts_gaze_positions) - -``` - -* ts_fixations would look like: - -|timestamp|positions |duration|dispersion|focus | -|:--------|:-------------------------------------------------------------|:-------|:---------|:--------| -|60034 |{"60034":[846,620], "60044":[837,641], "60054":[835,649], ...}|450 |40 |(840,660)| -|60504 |{"60504":[838,667], "60514":[838,667], "60524":[837,669], ...}|100 |38 |(834,651)| -|... |... |... |.. |... | - -* ts_saccades would look like: - -|timestamp|positions |duration| -|:--------|:---------------------------------------|:-------| -|60484 |{"60484":[836, 669], "60494":[837, 669]}|10 | -|60594 |{"60594":[833, 613], "60614":[927, 601]}|20 | -|... |... |... | - -* ts_status would look like: - -|timestamp|position |type |index| -|:--------|:---------|:-------|:----| -|60034 |(846, 620)|Fixation|1 | -|60044 |(837, 641)|Fixation|1 | -|... |... |... |. | -|60464 |(836, 668)|Fixation|1 | -|60474 |(836, 668)|Fixation|1 | -|60484 |(836, 669)|Saccade |1 | -|60494 |(837, 669)|Saccade |1 | -|60504 |(838, 667)|Fixation|2 | -|60514 |(838, 667)|Fixation|2 | -|... |... |... |. | -|60574 |(825, 629)|Fixation|2 | -|60584 |(829, 615)|Fixation|2 | -|60594 |(833, 613)|Saccade |2 | -|60614 |(927, 601)|Saccade |2 | -|60624 |(933, 599)|Fixation|3 | -|60634 |(934, 603)|Fixation|3 | -|... |... |... |. | - - -!!! note - [TimeStampedGazeMovements](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeMovements), [TimeStampedGazeMovements](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeMovements) and [TimeStampedGazeStatus](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazeStatus) classes inherit from [TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) class. - - Read [Timestamped data](../timestamped_data/introduction.md) section to understand all features it provides. - -### Generator method - -[GazeMovementIdentifier](../../argaze.md/#argaze.GazeFeatures.GazeMovementIdentifier) can be called with a [TimeStampedGazePositions](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazePositions) buffer in argument to generate gaze movement each time one is identified. - -``` python -# Assuming that timestamped gaze positions are provided through data reading - -for ts, gaze_movement in gaze_movement_identifier(ts_gaze_positions): - - # Fixation identified - if GazeFeatures.is_fixation(gaze_movement): - - # Do something with each fixation - ... - - # Saccade identified - elif GazeFeatures.is_saccade(gaze_movement): - - # Do something with each saccade - ... -``` \ No newline at end of file diff --git a/docs/user_guide/gaze_features/gaze_position.md b/docs/user_guide/gaze_features/gaze_position.md deleted file mode 100644 index 48495b4..0000000 --- a/docs/user_guide/gaze_features/gaze_position.md +++ /dev/null @@ -1,98 +0,0 @@ -Gaze position -============= - -[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines a [GazePosition](../../argaze.md/#argaze.GazeFeatures.GazePosition) class to handle point coordinates with a precision value. - -``` python -from argaze import GazeFeatures - -# Define a basic gaze position -gaze_position = GazeFeatures.GazePosition((123, 456)) - -# Define a gaze position with a precision value -gaze_position = GazeFeatures.GazePosition((789, 765), precision=10) - -# Access to gaze position value and precision -print(f'position: {gaze_position.value}') -print(f'precision: {gaze_position.precision}') - -``` - -## Validity - -[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines also a [UnvalidGazePosition](../../argaze.md/#argaze.GazeFeatures.UnvalidGazePosition) class that inherits from [GazePosition](../../argaze.md/#argaze.GazeFeatures.GazePosition) to handle case where no gaze position exists because of any specific device reason. - -``` python -from argaze import GazeFeatures - -# Define a basic unvalid gaze position -gaze_position = GazeFeatures.UnvalidGazePosition() - -# Define a basic unvalid gaze position with a message value -gaze_position = GazeFeatures.UnvalidGazePosition("Something bad happened") - -# Access to gaze position validity -print(f'validity: {gaze_position.valid}') - -``` - -## Distance - -[GazePosition](../../argaze.md/#argaze.GazeFeatures.GazePosition) class provides a **distance** method to calculate the distance to another gaze position instance. - -![Distance](../../img/distance.png) - -``` python -# Distance between A and B positions -d = gaze_position_A.distance(gaze_position_B) -``` - -## Overlapping - -[GazePosition](../../argaze.md/#argaze.GazeFeatures.GazePosition) class provides an **overlap** method to test if a gaze position overlaps another one considering their precisions. - -![Gaze overlapping](../../img/overlapping.png) - -``` python -# Check that A overlaps B -if gaze_position_A.overlap(gaze_position_B): - - # Do something if A overlaps B - ... - -# Check that A overlaps B and B overlaps A -if gaze_position_A.overlap(gaze_position_B, both=True): - - # Do something if A overlaps B AND B overlaps A - ... -``` - -## Timestamped gaze positions - -[TimeStampedGazePositions](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazePositions) inherits from [TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) class to handle especially gaze positions. - -### Import from dataframe - -It is possible to load timestamped gaze positions from a [Pandas DataFrame](https://pandas.pydata.org/docs/getting_started/intro_tutorials/01_table_oriented.html#min-tut-01-tableoriented) object. - -```python -import pandas - -# Load gaze positions from a CSV file into Panda Dataframe -dataframe = pandas.read_csv('gaze_positions.csv', delimiter="\t", low_memory=False) - -# Convert Panda dataframe into TimestampedGazePositions buffer precising the use of each specific column labels -ts_gaze_positions = GazeFeatures.TimeStampedGazePositions.from_dataframe(dataframe, timestamp = 'Recording timestamp [ms]', x = 'Gaze point X [px]', y = 'Gaze point Y [px]') - -``` -### Iterator - -Like [TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer), [TimeStampedGazePositions](../../argaze.md/#argaze.GazeFeatures.TimeStampedGazePositions) class provides iterator feature: - -```python -for timestamp, gaze_position in ts_gaze_positions.items(): - - # Do something with each gaze position - ... - -``` diff --git a/docs/user_guide/gaze_features/introduction.md b/docs/user_guide/gaze_features/introduction.md deleted file mode 100644 index bf818ba..0000000 --- a/docs/user_guide/gaze_features/introduction.md +++ /dev/null @@ -1,7 +0,0 @@ -Gaze analysis -============= - -This section refers to: - -* [GazeFeatures](../../argaze.md/#argaze.GazeFeatures) -* [GazeAnalysis](../../argaze.md/#argaze.GazeAnalysis) \ No newline at end of file diff --git a/docs/user_guide/gaze_features/scan_path.md b/docs/user_guide/gaze_features/scan_path.md deleted file mode 100644 index 46af28b..0000000 --- a/docs/user_guide/gaze_features/scan_path.md +++ /dev/null @@ -1,169 +0,0 @@ -Scan path -========= - -[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines classes to handle successive fixations/saccades and analyse their spatial or temporal properties. - -## Fixation based scan path - -### Definition - -The [ScanPath](../../argaze.md/#argaze.GazeFeatures.ScanPath) class is defined as a list of [ScanSteps](../../argaze.md/#argaze.GazeFeatures.ScanStep) which are defined as a fixation and a consecutive saccade. - -![Fixation based scan path](../../img/scan_path.png) - -As fixations and saccades are identified, the scan path is built by calling respectively [append_fixation](../../argaze.md/#argaze.GazeFeatures.ScanPath.append_fixation) and [append_saccade](../../argaze.md/#argaze.GazeFeatures.ScanPath.append_saccade) methods. - -### Analysis - -[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines abstract [ScanPathAnalyzer](../../argaze.md/#argaze.GazeFeatures.ScanPathAnalyzer) classe to let add various analysis algorithms. - -Some scan path analysis are available thanks to [GazeAnalysis](../../argaze.md/#argaze.GazeAnalysis) submodule: - -* [K-Coefficient](../../argaze.md/#argaze.GazeAnalysis.KCoefficient) -* [Nearest Neighbor Index](../../argaze.md/#argaze.GazeAnalysis.NearestNeighborIndex) -* [Exploit Explore Ratio](../../argaze.md/#argaze.GazeAnalysis.ExploitExploreRatio) - -### Example - -Here is a sample of code to illustrate how to built a scan path and analyze it: - -``` python -from argaze import GazeFeatures -from argaze.GazeAnalysis import KCoefficient - -# Create a empty scan path -scan_path = GazeFeatures.ScanPath() - -# Create a K coefficient analyzer -kc_analyzer = KCoefficient.ScanPathAnalyzer() - -# Assuming a gaze movement is identified at ts time -...: - - # Fixation identified - if GazeFeatures.is_fixation(gaze_movement): - - # Append fixation to scan path : no step is created - scan_path.append_fixation(ts, gaze_movement) - - # Saccade identified - elif GazeFeatures.is_saccade(gaze_movement): - - # Append saccade to scan path : a new step should be created - new_step = scan_path.append_saccade(data_ts, gaze_movement) - - # Analyse scan path - if new_step: - - K = kc_analyzer.analyze(scan_path) - - # Do something with K metric - ... -``` - -## AOI based scan path - -### Definition - -The [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) class is defined as a list of [AOIScanSteps](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) which are defined as set of consecutives fixations looking at a same Area Of Interest (AOI) and a consecutive saccade. - -![AOI based scan path](../../img/aoi_scan_path.png) - -As fixations and saccades are identified, the scan path is built by calling respectively [append_fixation](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.append_fixation) and [append_saccade](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.append_saccade) methods. - -### Analysis - -[GazeFeatures](../../argaze.md/#argaze.GazeFeatures) defines abstract [AOIScanPathAnalyzer](../../argaze.md/#argaze.GazeFeatures.AOIScanPathAnalyzer) classe to let add various analysis algorithms. - -Some scan path analysis are available thanks to [GazeAnalysis](../../argaze.md/#argaze.GazeAnalysis) submodule: - -* [Transition matrix](../../argaze.md/#argaze.GazeAnalysis.TransitionMatrix) -* [Entropy](../../argaze.md/#argaze.GazeAnalysis.Entropy) -* [Lempel-Ziv complexity](../../argaze.md/#argaze.GazeAnalysis.LempelZivComplexity) -* [N-Gram](../../argaze.md/#argaze.GazeAnalysis.NGram) -* [K-modified coefficient](../../argaze.md/#argaze.GazeAnalysis.KCoefficient) - -### Example - -Here is a sample of code to illustrate how to built a AOI scan path and analyze it: - -``` python -from argaze import GazeFeatures -from argaze.GazeAnalysis import LempelZivComplexity - -# Assuming all AOI names are listed -... - -# Create a empty AOI scan path -aoi_scan_path = GazeFeatures.AOIScanPath(aoi_names) - -# Create a Lempel-Ziv complexity analyzer -lzc_analyzer = LempelZivComplexity.AOIScanPathAnalyzer() - -# Assuming a gaze movement is identified at ts time -...: - - # Fixation identified - if GazeFeatures.is_fixation(gaze_movement): - - # Assuming fixation is detected as inside an AOI - ... - - # Append fixation to AOI scan path : a new step should be created - new_step = aoi_scan_path.append_fixation(ts, gaze_movement, looked_aoi_name) - - # Analyse AOI scan path - if new_step: - - LZC = kc_analyzer.analyze(aoi_scan_path) - - # Do something with LZC metric - ... - - # Saccade identified - elif GazeFeatures.is_saccade(gaze_movement): - - # Append saccade to scan path : no step is created - aoi_scan_path.append_saccade(data_ts, gaze_movement) - -``` - -### Advanced - -The [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) class provides some advanced features to analyse it. - -#### Letter sequence - -When a new [AOIScanStep](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) is created, the [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) internally affects a unique letter index related to its AOI to ease pattern analysis. -Then, the [AOIScanPath letter_sequence](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.letter_sequence) property returns the concatenation of each [AOIScanStep](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) letter. -The [AOIScanPath get_letter_aoi](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.get_letter_aoi) method helps to get back the AOI related to a letter index. - -``` python -# Assuming the following AOI scan path is built: Foo > Bar > Shu > Foo -aoi_scan_path = ... - -# Letter sequence representation should be: 'ABCA' -print(aoi_scan_path.letter_sequence) - -# Output should be: 'Bar' -print(aoi_scan_path.get_letter_aoi('B')) - -``` - -#### Transition matrix - -When a new [AOIScanStep](../../argaze.md/#argaze.GazeFeatures.AOIScanStep) is created, the [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath) internally counts the number of transitions from an AOI to another AOI to ease Markov chain analysis. -Then, the [AOIScanPath transition_matrix](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.transition_matrix) property returns a [Pandas DataFrame](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html) where indexes are transition departures and columns are transition destinations. - -Here is an exemple of transition matrix for the following [AOIScanPath](../../argaze.md/#argaze.GazeFeatures.AOIScanPath): Foo > Bar > Shu > Foo > Bar - -| |Foo|Bar|Shu| -|:--|:--|:--|:--| -|Foo|0 |2 |0 | -|Bar|0 |0 |1 | -|Shu|1 |0 |0 | - - -#### Fixations count - -The [AOIScanPath fixations_count](../../argaze.md/#argaze.GazeFeatures.AOIScanPath.fixations_count) method returns the total number of fixations in the whole scan path and a dictionary to get the fixations count per AOI. diff --git a/docs/user_guide/timestamped_data/data_synchronisation.md b/docs/user_guide/timestamped_data/data_synchronisation.md deleted file mode 100644 index 5190eab..0000000 --- a/docs/user_guide/timestamped_data/data_synchronisation.md +++ /dev/null @@ -1,106 +0,0 @@ -Data synchronisation -==================== - -Recorded data needs to be synchronized to link them before further processings. - -The [TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) class provides various methods to help in such task. - -## Pop last before - -![Pop last before](../../img/pop_last_before.png) - -The code below shows how to use [pop_last_before](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer.pop_last_before) method in order to synchronise two timestamped data buffers with different timestamps: - -``` python -from argaze import DataStructures - -# Assuming A_data_record and B_data_record are TimeStampedBuffer instances with different timestamps - -for A_ts, A_data in A_data_record.items(): - - try: - - # Get nearest B data before current A data and remove all B data before (including the returned one) - B_ts, B_data = B_data_record.pop_last_before(A_ts) - - # No data stored before A_ts timestamp - except KeyError: - - pass - -``` - -## Pop last until - -![Pop last until](../../img/pop_last_until.png) - -The code below shows how to use [pop_last_until](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer.pop_last_until) method in order to synchronise two timestamped data buffers with different timestamps: - -``` python -from argaze import DataStructures - -# Assuming A_data_record and B_data_record are TimeStampedBuffer instances with different timestamps - -for A_ts, A_data in A_data_record.items(): - - try: - - # Get nearest B data after current A data and remove all B data before - B_ts, B_data = B_data_record.pop_last_until(A_ts) - - # No data stored until A_ts timestamp - except KeyError: - - pass - -``` - -## Get last before - -![Get last before](../../img/get_last_before.png) - -The code below shows how to use [get_last_before](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer.get_last_before) method in order to synchronise two timestamped data buffers with different timestamps: - -``` python -from argaze import DataStructures - -# Assuming A_data_record and B_data_record are TimeStampedBuffer instances with different timestamps - -for A_ts, A_data in A_data_record.items(): - - try: - - # Get nearest B data before current A data - B_ts, B_data = B_data_record.get_last_before(A_ts) - - # No data stored before A_ts timestamp - except KeyError: - - pass - -``` - -## Get last until - -![Get last until](../../img/get_last_until.png) - -The code below shows how to use [get_last_until](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer.get_last_until) method in order to synchronise two timestamped data buffers with different timestamps: - -``` python -from argaze import DataStructures - -# Assuming A_data_record and B_data_record are TimeStampedBuffer instances with different timestamps - -for A_ts, A_data in A_data_record.items(): - - try: - - # Get nearest B data after current A data - B_ts, B_data = B_data_record.get_last_until(A_ts) - - # No data stored until A_ts timestamp - except KeyError: - - pass - -``` diff --git a/docs/user_guide/timestamped_data/introduction.md b/docs/user_guide/timestamped_data/introduction.md deleted file mode 100644 index 974e2be..0000000 --- a/docs/user_guide/timestamped_data/introduction.md +++ /dev/null @@ -1,6 +0,0 @@ -Timestamped data -================ - -Working with wearable eye tracker devices implies to handle various timestamped data like gaze positions, pupills diameter, fixations, saccades, ... - -This section mainly refers to [DataStructures.TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) class. diff --git a/docs/user_guide/timestamped_data/ordered_dictionary.md b/docs/user_guide/timestamped_data/ordered_dictionary.md deleted file mode 100644 index 64dd899..0000000 --- a/docs/user_guide/timestamped_data/ordered_dictionary.md +++ /dev/null @@ -1,19 +0,0 @@ -Ordered dictionary -================== - -[TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) class inherits from [OrderedDict](https://docs.python.org/3/library/collections.html#collections.OrderedDict) as data are de facto ordered by time. - -Any data type can be stored using int or float keys as timestamp. - -```python -from argaze import DataStructures - -# Create a timestamped data buffer -ts_data = DataStructures.TimeStampedBuffer() - -# Store any data type using numeric keys -ts_data[0] = 123 -ts_data[0.1] = "message" -ts_data[0.23] = {"key": value} -... -``` diff --git a/docs/user_guide/timestamped_data/pandas_dataframe_conversion.md b/docs/user_guide/timestamped_data/pandas_dataframe_conversion.md deleted file mode 100644 index 7614e73..0000000 --- a/docs/user_guide/timestamped_data/pandas_dataframe_conversion.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Pandas DataFrame conversion ---- - -Pandas DataFrame conversion -=========================== - -A [Pandas DataFrame](https://pandas.pydata.org/docs/getting_started/intro_tutorials/01_table_oriented.html#min-tut-01-tableoriented) is a python data structure allowing powerful table processings. - -## Export as dataframe - -[TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) instance can be converted into dataframe provided that data values are stored as dictionaries. - -```python -from argaze import DataStructures - -# Create a timestamped data buffer -ts_data = DataStructures.TimeStampedBuffer() - -# Store various data as dictionary -ts_data[10] = {"A_key": 0, "B_key": 0.123}} -ts_data[20] = {"A_key": 4, "B_key": 0.567}} -ts_data[30] = {"A_key": 8, "B_key": 0.901}} -... - -# Convert timestamped data buffer into dataframe -ts_buffer_dataframe = ts_buffer.as_dataframe() -``` - -ts_buffer_dataframe would look like: - -|timestamp|A_key|B_key| -|:--------|:----|:----| -|10 |0 |0.123| -|20 |4 |0.567| -|30 |8 |0.901| -|... |... |... | - -## Import from dataframe - -Reversely, [TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) instance can be created from dataframe, as a result of which each dataframe columns label will become a key of data value dictionary. Notice that the column containing timestamp values have to be called 'timestamp'. diff --git a/docs/user_guide/timestamped_data/saving_and_loading.md b/docs/user_guide/timestamped_data/saving_and_loading.md deleted file mode 100644 index 4e6a094..0000000 --- a/docs/user_guide/timestamped_data/saving_and_loading.md +++ /dev/null @@ -1,14 +0,0 @@ -Saving and loading -================== - -[TimeStampedBuffer](../../argaze.md/#argaze.DataStructures.TimeStampedBuffer) instance can be saved as and loaded from JSON file format. - -```python - -# Save -ts_data.to_json('./data.json') - -# Load -ts_data = DataStructures.TimeStampedBuffer.from_json('./data.json') - -``` diff --git a/mkdocs.yml b/mkdocs.yml index 784c9e2..d00d6e7 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -34,25 +34,6 @@ nav: - user_guide/aruco_markers_pipeline/advanced_topics/scripting.md - user_guide/aruco_markers_pipeline/advanced_topics/optic_parameters_calibration.md - user_guide/aruco_markers_pipeline/advanced_topics/aruco_detector_configuration.md - -# - Areas Of Interest: -# - user_guide/areas_of_interest/introduction.md -# - user_guide/areas_of_interest/aoi_scene_description.md -# - user_guide/areas_of_interest/aoi_scene_projection.md -# - user_guide/areas_of_interest/vision_cone_filtering.md -# - user_guide/areas_of_interest/aoi_matching.md -# - user_guide/areas_of_interest/heatmap.md -# - Gaze Features: -# - user_guide/gaze_features/introduction.md -# - user_guide/gaze_features/gaze_position.md -# - user_guide/gaze_features/gaze_movement.md -# - user_guide/gaze_features/scan_path.md -# - Timestamped data: -# - user_guide/timestamped_data/introduction.md -# - user_guide/timestamped_data/ordered_dictionary.md -# - user_guide/timestamped_data/saving_and_loading.md -# - user_guide/timestamped_data/data_synchronisation.md -# - user_guide/timestamped_data/pandas_dataframe_conversion.md - utils: - user_guide/utils/ready-made_scripts.md - user_guide/utils/demonstrations_scripts.md -- cgit v1.1 From 46560abe16ea95fdfcc3afb1b5fb702aa04e9f1b Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 27 Sep 2023 23:22:21 +0200 Subject: Updating aruco marker position. --- docs/img/aruco_markers_description.png | Bin 16478 -> 16455 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/docs/img/aruco_markers_description.png b/docs/img/aruco_markers_description.png index b840fcd..d7c139c 100644 Binary files a/docs/img/aruco_markers_description.png and b/docs/img/aruco_markers_description.png differ -- cgit v1.1 From 0ab16a1a93ba62e791489f453d4ec20b7b5655bb Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 27 Sep 2023 23:22:47 +0200 Subject: Using Screen name instead of MyScreen --- docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md index 032e2b6..8affe91 100644 --- a/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md +++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md @@ -67,7 +67,7 @@ Now, let's understand the meaning of each JSON entry. An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) instance can contains multiples [ArFrames](../../argaze.md/#argaze.ArFeatures.ArFrame) stored by name. -### MyScreen +### Screen The name of a 3D AOI **and** of an [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame). Basically useful for visualisation purpose. -- cgit v1.1 From c12e429190b4f63064c81edfa08fb00b8ed8a28c Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Thu, 28 Sep 2023 23:31:33 +0200 Subject: Improving documentation details. --- docs/img/aruco_camera_aoi_frame.png | Bin 41104 -> 49405 bytes docs/img/aruco_camera_aoi_projection.png | Bin 34892 -> 37858 bytes docs/img/scene.png | Bin 32079 -> 41117 bytes .../advanced_topics/scripting.md | 4 +-- .../aruco_markers_pipeline/aoi_3d_description.md | 4 +-- .../aruco_markers_pipeline/aoi_3d_frame.md | 2 +- .../aruco_markers_pipeline/aoi_3d_projection.md | 4 +-- .../aruco_markers_description.md | 5 +-- .../configuration_and_execution.md | 14 ++++----- .../aruco_markers_pipeline/introduction.md | 4 +-- .../gaze_analysis_pipeline/aoi_2d_description.md | 2 +- .../gaze_analysis_pipeline/aoi_analysis.md | 4 +-- .../gaze_analysis_pipeline/background.md | 4 +-- docs/user_guide/gaze_analysis_pipeline/heatmap.md | 8 ++--- .../gaze_analysis_pipeline/introduction.md | 2 +- docs/user_guide/gaze_analysis_pipeline/logging.md | 4 +-- .../gaze_analysis_pipeline/visualisation.md | 34 ++++++++++----------- docs/user_guide/utils/ready-made_scripts.md | 2 +- mkdocs.yml | 2 +- 19 files changed, 47 insertions(+), 52 deletions(-) diff --git a/docs/img/aruco_camera_aoi_frame.png b/docs/img/aruco_camera_aoi_frame.png index 944f9ff..f21cc8d 100644 Binary files a/docs/img/aruco_camera_aoi_frame.png and b/docs/img/aruco_camera_aoi_frame.png differ diff --git a/docs/img/aruco_camera_aoi_projection.png b/docs/img/aruco_camera_aoi_projection.png index ec708db..df1ec4c 100644 Binary files a/docs/img/aruco_camera_aoi_projection.png and b/docs/img/aruco_camera_aoi_projection.png differ diff --git a/docs/img/scene.png b/docs/img/scene.png index 251c7bf..e7edd88 100644 Binary files a/docs/img/scene.png and b/docs/img/scene.png differ diff --git a/docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md b/docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md index 0b2ef52..529bff8 100644 --- a/docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md +++ b/docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md @@ -72,7 +72,7 @@ for name, aruco_scene in aruco_camera.scenes.items(): ## Pipeline execution outputs -[ArUcoCamera.watch](../../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method returns many data about pipeline execution. +[ArUcoCamera.watch](../../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method returns data about pipeline execution. ```python # Assuming that images are available @@ -81,7 +81,7 @@ for name, aruco_scene in aruco_camera.scenes.items(): # Watch image with ArUco camera detection_time, exception = aruco_camera.watch(image) - # Do something with pipeline detection times + # Do something with pipeline detection time ... # Do something with pipeline exception diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md index 502f905..b02bc9e 100644 --- a/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md +++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_description.md @@ -1,7 +1,7 @@ Describe 3D AOI =============== -Once [ArUco markers are placed into a scene](aruco_markers_description.md), [areas of interest (AOI)](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) need to be described into the same 3D referential. +Now [scene pose is estimated](aruco_markers_description.md) thanks to ArUco markers description, [areas of interest (AOI)](../../argaze.md/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) need to be described into the same 3D referential. In the example scene, the screen and the sheet are considered as areas of interest. @@ -26,14 +26,12 @@ v 14.200000 -3.000000 28.350000 v 35.200000 -3.000000 28.350000 v 14.200000 -3.000000 -1.35 v 35.200000 -3.000000 -1.35 -s off f 1 2 4 3 o Screen v 2.750000 2.900000 -0.500000 v 49.250000 2.900000 -0.500000 v 2.750000 29.100000 -0.500000 v 49.250000 29.100000 -0.500000 -s off f 5 6 8 7 ``` diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md index 8affe91..f1ae1f6 100644 --- a/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md +++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_frame.md @@ -69,7 +69,7 @@ An [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) instance can co ### Screen -The name of a 3D AOI **and** of an [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame). Basically useful for visualisation purpose. +The name of a 3D AOI **and** an [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame). Basically useful for visualisation purpose. !!! warning "AOI / Frame names policy" diff --git a/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md b/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md index 0d58d9a..8c7310b 100644 --- a/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md +++ b/docs/user_guide/aruco_markers_pipeline/aoi_3d_projection.md @@ -96,7 +96,7 @@ Now, let's understand the meaning of each JSON entry. ### *layers* -An [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) instance can contains multiples [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer) stored by name. +An [ArUcoCamera](../../argaze.md/#argaze.ArFeatures.ArFrame) instance can contains multiples [ArLayers](../../argaze.md/#argaze.ArFeatures.ArLayer) stored by name. ### MyLayer @@ -110,7 +110,7 @@ The name of an [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer). Basically [ArUcoScene](../../argaze.md/#argaze.ArUcoMarkers.ArUcoScene) layers are projected into their dedicated [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) layers when calling the [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method. -## Add AOI analysis features to ArUcoCamera +## Add AOI analysis features to ArUcoCamera layer When a scene layer is projected into a camera layer, it means that the 3D scene's AOI are transformed into 2D camera's AOI. diff --git a/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md b/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md index 3addcab..8104345 100644 --- a/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md +++ b/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md @@ -3,7 +3,7 @@ Set up ArUco markers First of all, ArUco markers needs to be printed and placed into the scene. -Here is an example scene where markers are surrounding a workspace with a screen and a sheet on the table. +Here is an example scene where markers are surrounding a workspace with a screen and a sheet on the table (considering the sheet stays static for the moment). ![Scene](../../img/scene.png) @@ -66,7 +66,6 @@ v 5.000000 0.000000 0.000000 v 0.000000 5.000000 0.000000 v 5.000000 5.000000 0.000000 vn 0.0000 0.0000 1.0000 -s off f 1//1 2//1 4//1 3//1 o DICT_APRILTAG_16h5#1_Marker v -0.855050 24.000002 4.349232 @@ -74,7 +73,6 @@ v 0.855050 24.000002 -0.349231 v -0.855050 29.000002 4.349232 v 0.855050 29.000002 -0.349231 vn 0.9397 0.0000 0.3420 -s off f 5//2 6//2 8//2 7//2 o DICT_APRILTAG_16h5#2_Marker v 44.000000 0.000000 9.500000 @@ -82,7 +80,6 @@ v 49.000000 0.000000 9.500000 v 44.000000 -0.000000 4.500000 v 49.000000 -0.000000 4.500000 vn 0.0000 1.0000 -0.0000 -s off f 9//3 10//3 12//3 11//3 ``` diff --git a/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md b/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md index 2205ed2..3bded3a 100644 --- a/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md +++ b/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md @@ -29,6 +29,12 @@ Here is a simple JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCa }, "image_parameters": { "background_weight": 1, + "draw_detected_markers": { + "color": [0, 255, 0], + "draw_axes": { + "thickness": 3 + } + }, "draw_gaze_positions": { "color": [0, 255, 255], "size": 2 @@ -40,12 +46,6 @@ Here is a simple JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCa }, "draw_saccades": { "line_color": [255, 0, 255] - }, - "draw_detected_markers": { - "color": [0, 255, 0], - "draw_axes": { - "thickness": 3 - } } } } @@ -135,4 +135,4 @@ Particularly, timestamped gaze positions can be passed one by one to [ArUcoCamer At this point, the [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures.ArCamera.watch) method only detects ArUco markers and the [ArUcoCamera.look](../../argaze.md/#argaze.ArFeatures.ArCamera.look) method only process gaze movement identification without any AOI support as no scene description is provided into the JSON configuration file. - Read the next chapters to learn [how to estimate scene pose](pose_estimation.md) and [how to project 3D AOI](aoi_3d_projection.md). \ No newline at end of file + Read the next chapters to learn [how to estimate scene pose](pose_estimation.md), [how to describe 3D scene's AOI](aoi_3d_description.md) and [how to project them into camera frame](aoi_3d_projection.md). \ No newline at end of file diff --git a/docs/user_guide/aruco_markers_pipeline/introduction.md b/docs/user_guide/aruco_markers_pipeline/introduction.md index 26294f7..a83da9a 100644 --- a/docs/user_guide/aruco_markers_pipeline/introduction.md +++ b/docs/user_guide/aruco_markers_pipeline/introduction.md @@ -9,16 +9,16 @@ The OpenCV library provides a module to detect fiducial markers into a picture a The ArGaze [ArUcoMarkers submodule](../../argaze.md/#argaze.ArUcoMarkers) eases markers creation, markers detection and 3D scene pose estimation through a set of high level classes. -First, let's look at the schema below: it gives an overview of the main notions involved in the following chapters. + To build your own ArUco markers pipeline, you need to know: * [How to setup ArUco markers into a scene](aruco_markers_description.md), -* [How to describe scene's AOI](aoi_3d_description.md), * [How to load and execute ArUco markers pipeline](configuration_and_execution.md), * [How to estimate scene pose](pose_estimation.md), +* [How to describe scene's AOI](aoi_3d_description.md), * [How to project 3D AOI into camera frame](aoi_3d_projection.md), * [How to define a 3D AOI as a frame](aoi_3d_frame.md) diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md b/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md index ad8ee74..4b7ed69 100644 --- a/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md +++ b/docs/user_guide/gaze_analysis_pipeline/aoi_2d_description.md @@ -29,7 +29,7 @@ Here are common SVG file features needed to describe AOI: * *id* attribute indicates AOI name. * *path* element describes any polygon using only [M, L and Z path intructions](https://www.w3.org/TR/SVG2/paths.html#PathData) -* *rect* and *circle* allow respectively to describe rectangular and circle AOI. +* *rect*, *circle* and *ellipse* allow respectively to describe rectangular, circular and elliptic AOI. ### Edit JSON file description diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md index b282f80..3fd15db 100644 --- a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md +++ b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md @@ -1,13 +1,13 @@ Enable AOI analysis =================== -Once [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) is [configured](configuration_and_execution.md) and [2D AOI are described](aoi_2d_description.md), gaze movement can be matched with AOI to build an AOI scan path before analyze it. +Once [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) is [configured](configuration_and_execution.md) and [2D AOI are described](aoi_2d_description.md), fixation can be matched with AOI to build an AOI scan path before analyze it. ![Layer](../../img/ar_layer.png) ## Add ArLayer to ArFrame JSON configuration file -The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class defines a space where to make matching of gaze movements with AOI and inside which those matchings need to be analyzed. +The [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) class defines a space where to make matching of fixations with AOI and inside which those matchings need to be analyzed. Here is an extract from the JSON [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) configuration file with a sample where one layer is added: diff --git a/docs/user_guide/gaze_analysis_pipeline/background.md b/docs/user_guide/gaze_analysis_pipeline/background.md index ee27495..a61abdc 100644 --- a/docs/user_guide/gaze_analysis_pipeline/background.md +++ b/docs/user_guide/gaze_analysis_pipeline/background.md @@ -30,10 +30,10 @@ Here is an extract from the JSON ArFrame configuration file where a background p Now, let's understand the meaning of each JSON entry. -### Background +### *background* The path to an image file on disk. -### Background weight +### *background_weight* The weight of background overlay in [ArFrame.image](../../argaze.md/#argaze.ArFeatures.ArFrame.image) between 0 and 1. diff --git a/docs/user_guide/gaze_analysis_pipeline/heatmap.md b/docs/user_guide/gaze_analysis_pipeline/heatmap.md index 5310d64..6d9ad18 100644 --- a/docs/user_guide/gaze_analysis_pipeline/heatmap.md +++ b/docs/user_guide/gaze_analysis_pipeline/heatmap.md @@ -33,21 +33,21 @@ Here is an extract from the JSON ArFrame configuration file where heatmap is ena Now, let's understand the meaning of each JSON entry. -### Size +### *size* The heatmap image size in pixel. Higher size implies higher CPU load. -### Sigma +### *sigma* The gaussian point spreading to draw at each gaze position. ![Point spread](../../img/point_spread.png) -### Buffer +### *buffer* The size of point spread images buffer (0 means no buffering) to visualize only last N gaze positions. -### Heatmap weight +### *heatmap_weight* The weight of heatmap overlay in [ArFrame.image](../../argaze.md/#argaze.ArFeatures.ArFrame.image) between 0 and 1. diff --git a/docs/user_guide/gaze_analysis_pipeline/introduction.md b/docs/user_guide/gaze_analysis_pipeline/introduction.md index 76a146c..339dd90 100644 --- a/docs/user_guide/gaze_analysis_pipeline/introduction.md +++ b/docs/user_guide/gaze_analysis_pipeline/introduction.md @@ -13,7 +13,7 @@ To build your own gaze analysis pipeline, you need to know: * [How to load and execute gaze analysis pipeline](configuration_and_execution.md), * [How to describe frame's AOI](aoi_2d_description.md), * [How to enable AOI analysis](aoi_analysis.md), -* [How to visualize ArFrame and ArLayers](visualisation.md), +* [How to visualize pipeline steps outputs](visualisation.md), * [How to log resulted gaze analysis](logging.md), * [How to make heatmap image](heatmap.md). * [How to add a background image](background.md). diff --git a/docs/user_guide/gaze_analysis_pipeline/logging.md b/docs/user_guide/gaze_analysis_pipeline/logging.md index 1dea712..055a535 100644 --- a/docs/user_guide/gaze_analysis_pipeline/logging.md +++ b/docs/user_guide/gaze_analysis_pipeline/logging.md @@ -7,7 +7,7 @@ Log gaze analysis [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) and [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer) have a log attribute to enable analysis logging. -Here is an extract from the JSON ArFrame configuration file where logging is enabled for the ArFrame and for one ArLayer: +Here is an extract from the JSON ArFrame configuration file where logging is enabled for the [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) and for one [ArLayer](../../argaze.md/#argaze.ArFeatures.ArLayer): ```json { @@ -91,7 +91,7 @@ Assuming that [ArGaze.GazeAnalysis.NGram](../../argaze.md/#argaze.GazeAnalysis.N |timestamped|ngrams_count| |:----------|:-----------| |5687 |"{3: {}, 4: {}, 5: {}}"| -|6208 |"{3: {('upper_left_corner', 'lower_left_corner', 'lower_right_corner'): 1}, 4: {}, 5: {}}"| +|6208 |"{3: {('LeftPanel', 'GeoSector', 'CircularWidget'): 1}, 4: {}, 5: {}}"| |... |... | diff --git a/docs/user_guide/gaze_analysis_pipeline/visualisation.md b/docs/user_guide/gaze_analysis_pipeline/visualisation.md index cf6fa41..5f06fac 100644 --- a/docs/user_guide/gaze_analysis_pipeline/visualisation.md +++ b/docs/user_guide/gaze_analysis_pipeline/visualisation.md @@ -17,6 +17,22 @@ Here is an extract from the JSON ArFrame configuration file with a sample where "size": [1920, 1080], ... "image_parameters": { + "draw_gaze_positions": { + "color": [0, 255, 255], + "size": 2 + }, + "draw_fixations": { + "deviation_circle_color": [255, 255, 255], + "duration_border_color": [127, 0, 127], + "duration_factor": 1e-2, + "draw_positions": { + "position_color": [0, 255, 255], + "line_color": [0, 0, 0] + } + }, + "draw_saccades": { + "line_color": [255, 0, 255] + }, "draw_scan_path": { "draw_fixations": { "deviation_circle_color": [255, 0, 255], @@ -55,22 +71,6 @@ Here is an extract from the JSON ArFrame configuration file with a sample where "looked_aoi_name_offset": [0, -10] } } - }, - "draw_fixations": { - "deviation_circle_color": [255, 255, 255], - "duration_border_color": [127, 0, 127], - "duration_factor": 1e-2, - "draw_positions": { - "position_color": [0, 255, 255], - "line_color": [0, 0, 0] - } - }, - "draw_saccades": { - "line_color": [255, 0, 255] - }, - "draw_gaze_positions": { - "color": [0, 255, 255], - "size": 2 } } } @@ -92,7 +92,7 @@ import cv2 # Assuming that timestamped gaze positions have been processed by ArFrame.look method ... -# Export heatmap image +# Export ArFrame image cv2.imwrite('./ar_frame.png', ar_frame.image()) ``` diff --git a/docs/user_guide/utils/ready-made_scripts.md b/docs/user_guide/utils/ready-made_scripts.md index bc8b277..c82a332 100644 --- a/docs/user_guide/utils/ready-made_scripts.md +++ b/docs/user_guide/utils/ready-made_scripts.md @@ -11,7 +11,7 @@ Collection of command-line scripts to provide useful features. ## ArUco scene exporter -Load a MOVIE with ArUco markers inside and select image into it, detect ArUco markers belonging to DICT_APRILTAG_16h5 dictionary with 5cm size into the selected image thanks to given OPTIC_PARAMETERS and DETECTOR_PARAMETERS then, export detected ArUco markers scene as .obj file into an *./src/argaze/utils/_export/scenes* folder. +Load a MOVIE with ArUco markers inside and select image into it, detect ArUco markers belonging to DICT_APRILTAG_16h5 dictionary with 5cm size into the selected image thanks to given OPTIC_PARAMETERS and DETECTOR_PARAMETERS files then, export detected ArUco markers scene as .obj file into an *./src/argaze/utils/_export/scenes* folder. ```shell python ./src/argaze/utils/aruco_markers_scene_export.py MOVIE DICT_APRILTAG_16h5 5 OPTIC_PARAMETERS DETECTOR_PARAMETERS -o ./src/argaze/utils/_export/scenes diff --git a/mkdocs.yml b/mkdocs.yml index d00d6e7..f988ed0 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -25,9 +25,9 @@ nav: - ArUco markers pipeline: - user_guide/aruco_markers_pipeline/introduction.md - user_guide/aruco_markers_pipeline/aruco_markers_description.md - - user_guide/aruco_markers_pipeline/aoi_3d_description.md - user_guide/aruco_markers_pipeline/configuration_and_execution.md - user_guide/aruco_markers_pipeline/pose_estimation.md + - user_guide/aruco_markers_pipeline/aoi_3d_description.md - user_guide/aruco_markers_pipeline/aoi_3d_projection.md - user_guide/aruco_markers_pipeline/aoi_3d_frame.md - Advanced Topics: -- cgit v1.1 From 34c69b2370598476cffb4ec063b8cab7f201b143 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Thu, 28 Sep 2023 23:31:47 +0200 Subject: Fixing code annotations. --- src/argaze/ArFeatures.py | 17 +++++------ src/argaze/ArUcoMarkers/ArUcoCamera.py | 15 +++++----- src/argaze/ArUcoMarkers/ArUcoDetector.py | 22 ++++++++------ src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 43 ++++++++++++++-------------- 4 files changed, 50 insertions(+), 47 deletions(-) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index edeac6b..84eae12 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -478,7 +478,7 @@ class ArLayer(): Parameters: draw_aoi_scene: AreaOfInterest.AOI2DScene.draw parameters (if None, no aoi scene is drawn) - draw_aoi_matching: AOIMatcher.draw parameters (which depends of the loaded aoi matcher module, if None, no aoi matching is drawn) + draw_aoi_matching: AOIMatcher.draw parameters (which depends of the loaded aoi matcher module, if None, no aoi matching is drawn) """ # Use draw_parameters attribute if no parameters @@ -1069,15 +1069,10 @@ class ArScene(): Define abstract Augmented Reality scene with ArLayers and ArFrames inside. Parameters: - name: name of the scene - layers: dictionary of ArLayers to project once the pose is estimated: see [project][argaze.ArFeatures.ArScene.project] function below. - frames: dictionary to ArFrames to project once the pose is estimated: see [project][argaze.ArFeatures.ArScene.project] function below. - angle_tolerance: Optional angle error tolerance to validate marker pose in degree used into [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function. - distance_tolerance: Optional distance error tolerance to validate marker pose in centimeter used into [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function. """ name: str @@ -1413,8 +1408,8 @@ class ArCamera(ArFrame): """Detect AR features from image and project scenes into camera frame. Returns: - - detection_time: AR features detection time in ms - - exceptions: dictionary with exception raised per scene + detection time: AR features detection time in ms. + exception: dictionary with exception raised per scene. """ raise NotImplementedError('watch() method not implemented') @@ -1422,7 +1417,8 @@ class ArCamera(ArFrame): def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition): """Project timestamped gaze position into each scene frames. - !!! warning watch method needs to be called first. + !!! warning + watch method needs to be called first. """ # Can't use camera frame when it is locked @@ -1471,7 +1467,8 @@ class ArCamera(ArFrame): def map(self): """Project camera frame background into scene frames background. - .. warning:: watch method needs to be called first. + !!! warning + watch method needs to be called first. """ # Can't use camera frame when it is locked diff --git a/src/argaze/ArUcoMarkers/ArUcoCamera.py b/src/argaze/ArUcoMarkers/ArUcoCamera.py index 4c3f042..4f00a3a 100644 --- a/src/argaze/ArUcoMarkers/ArUcoCamera.py +++ b/src/argaze/ArUcoMarkers/ArUcoCamera.py @@ -37,6 +37,7 @@ class ArUcoCamera(ArFeatures.ArCamera): """ Define an ArCamera based on ArUco marker detection. + Parameters: aruco_detector: ArUco marker detector """ @@ -144,9 +145,9 @@ class ArUcoCamera(ArFeatures.ArCamera): """Detect environment aruco markers from image and project scenes into camera frame. Returns: - - detection_time: aruco marker detection time in ms - - exceptions: dictionary with exception raised per scene - """ + detection time: aruco marker detection time in ms. + exception: dictionary with exception raised per scene. + """ # Detect aruco markers detection_time = self.aruco_detector.detect_markers(image) @@ -215,10 +216,10 @@ class ArUcoCamera(ArFeatures.ArCamera): """Get frame image with ArUco detection visualisation. Parameters: - draw_detected_markers: ArucoMarker.draw parameters (if None, no marker drawn) - draw_scenes: ArUcoScene.draw parameters (if None, no scene drawn) - draw_optic_parameters_grid: OpticParameter.draw parameters (if None, no grid drawn) - kwargs: ArCamera.image parameters + draw_detected_markers: ArucoMarker.draw parameters (if None, no marker drawn) + draw_scenes: ArUcoScene.draw parameters (if None, no scene drawn) + draw_optic_parameters_grid: OpticParameter.draw parameters (if None, no grid drawn) + kwargs: ArCamera.image parameters """ # Can't use camera frame when it is locked diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py index 305bee2..490b75b 100644 --- a/src/argaze/ArUcoMarkers/ArUcoDetector.py +++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py @@ -38,7 +38,8 @@ ArUcoDetectorType = TypeVar('ArUcoDetector', bound="ArUcoDetector") class DetectorParameters(): """Wrapper class around ArUco marker detector parameters. - .. note:: More details on [opencv page](https://docs.opencv.org/4.x/d1/dcd/structcv_1_1aruco_1_1DetectorParameters.html) + !!! note + More details on [opencv page](https://docs.opencv.org/4.x/d1/dcd/structcv_1_1aruco_1_1DetectorParameters.html) """ __parameters = aruco.DetectorParameters() @@ -107,7 +108,8 @@ class DetectorParameters(): """Formated detector parameters string representation. Parameters: - spec: 'modified' to get only modified parameters.""" + spec: 'modified' to get only modified parameters. + """ output = '' @@ -257,11 +259,11 @@ class ArUcoDetector(): def detect_markers(self, image: numpy.array) -> float: """Detect all ArUco markers into an image. - .. danger:: DON'T MIRROR IMAGE - It makes the markers detection to fail. + !!! danger "DON'T MIRROR IMAGE" + It makes the markers detection to fail. Returns: - - detection time: marker detection time in ms + detection time: marker detection time in ms. """ # Reset detected markers data @@ -369,8 +371,8 @@ class ArUcoDetector(): def detect_board(self, image: numpy.array, board, expected_markers_number): """Detect ArUco markers board in image setting up the number of detected markers needed to agree detection. - .. danger:: DON'T MIRROR IMAGE - It makes the markers detection to fail. + !!! danger "DON'T MIRROR IMAGE" + It makes the markers detection to fail. """ # detect markers from gray picture @@ -406,9 +408,11 @@ class ArUcoDetector(): @property def detection_metrics(self) -> Tuple[int, dict]: """Get marker detection metrics. + Returns: - number of detect function call - dict with number of detection for each marker identifier""" + number of detect function call + dict with number of detection for each marker identifier + """ return self.__detection_count, Counter(self.__detected_ids) diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index 5b6c69d..4a43965 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -79,29 +79,31 @@ def make_euler_rotation_vector(R): @dataclass(frozen=True) class Place(): - """Define a place as a pose and a marker.""" + """Define a place as a pose and a marker. - translation: numpy.array - """Position in group referential.""" + Parameters: + translation: position in group referential. + rotation: rotation in group referential. + marker: ArUco marker linked to the place. + """ + translation: numpy.array rotation: numpy.array - """Rotation in group referential.""" - marker: dict - """ArUco marker linked to the place.""" @dataclass class ArUcoMarkersGroup(): - """Handle group of ArUco markers as one unique spatial entity and estimate its pose.""" + """Handle group of ArUco markers as one unique spatial entity and estimate its pose. - marker_size: float = field(default=0.) - """Expected size of all markers in the group.""" + Parameters: + marker_size: expected size of all markers in the group. + dictionary: expected dictionary of all markers in the group. + places: expected markers place. + """ + marker_size: float = field(default=0.) dictionary: ArUcoMarkersDictionary.ArUcoMarkersDictionary = field(default_factory=ArUcoMarkersDictionary.ArUcoMarkersDictionary) - """Expected dictionary of all markers in the group.""" - places: dict = field(default_factory=dict) - """Expected markers place""" def __post_init__(self): """Init group pose and places pose.""" @@ -166,13 +168,13 @@ class ArUcoMarkersGroup(): """Load ArUco markers group from .obj file. !!! note - Expected object (o) name format: #_Marker + Expected object (o) name format: #_Marker !!! note - All markers have to belong to the same dictionary. + All markers have to belong to the same dictionary. !!! note - Marker normal vectors (vn) expected. + Marker normal vectors (vn) expected. """ @@ -360,8 +362,8 @@ class ArUcoMarkersGroup(): """Sort markers belonging to the group from given detected markers dict (cf ArUcoDetector.detect_markers()). Returns: - dict of markers belonging to this group - dict of remaining markers not belonging to this group + dict of markers belonging to this group + dict of remaining markers not belonging to this group """ group_markers = {} @@ -434,9 +436,9 @@ class ArUcoMarkersGroup(): """Evaluate if given markers configuration match related places configuration. Returns: - dict of consistent markers - dict of unconsistent markers - dict of identified distance or angle unconsistencies and out-of-bounds values + dict of consistent markers + dict of unconsistent markers + dict of identified distance or angle unconsistencies and out-of-bounds values """ consistent_markers = {} @@ -684,7 +686,6 @@ class ArUcoMarkersGroup(): """Draw group axes and places. Parameters: - draw_axes: draw_axes parameters (if None, no axes drawn) draw_places: draw_places parameters (if None, no places drawn) draw_places_axes: draw_places_axes parameters (if None, no places axes drawn) -- cgit v1.1 From 134542a822ea1ff6a7778fcff1cb460ee13cf4a2 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 3 Oct 2023 11:23:43 +0200 Subject: Fixing SVG file loading. --- src/argaze/ArFeatures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 84eae12..43acf55 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -183,7 +183,7 @@ class ArLayer(): # SVG file format for 2D dimension only if file_format == 'svg': - new_aoi_scene = AOIFeatures.AOI2DScene.from_svg(filepath) + new_aoi_scene = AOI2DScene.AOI2DScene.from_svg(filepath) # OBJ file format for 3D dimension only elif file_format == 'obj': -- cgit v1.1 From 8abbb34fb00e63f29d00361b8cc3a65c12aea0cd Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 4 Oct 2023 13:38:28 +0200 Subject: Adding useAruco3Detection parameter in documentation. --- .../advanced_topics/aruco_detector_configuration.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/user_guide/aruco_markers_pipeline/advanced_topics/aruco_detector_configuration.md b/docs/user_guide/aruco_markers_pipeline/advanced_topics/aruco_detector_configuration.md index 98b0841..f5b66c6 100644 --- a/docs/user_guide/aruco_markers_pipeline/advanced_topics/aruco_detector_configuration.md +++ b/docs/user_guide/aruco_markers_pipeline/advanced_topics/aruco_detector_configuration.md @@ -19,7 +19,8 @@ Here is an extract from the JSON [ArUcoCamera](../../../argaze.md/#argaze.ArUcoM "parameters": { "cornerRefinementMethod": 3, "aprilTagQuadSigma": 2, - "aprilTagDeglitch": 1 + "aprilTagDeglitch": 1, + "useAruco3Detection": 1 } }, ... -- cgit v1.1 From 59d47fa2ea032c5d47812e9b7b5e80cec344f59a Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 4 Oct 2023 19:37:04 +0200 Subject: minor --- src/argaze/GazeFeatures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/argaze/GazeFeatures.py b/src/argaze/GazeFeatures.py index 523bf2c..bd1a3da 100644 --- a/src/argaze/GazeFeatures.py +++ b/src/argaze/GazeFeatures.py @@ -847,8 +847,8 @@ class AOIScanPath(list): super().__init__() self.duration_max = duration_max - self.expected_aoi = expected_aoi + self.__duration = 0 @property -- cgit v1.1 From c55fccdb1e8c26ea08c1cb36fa9178cfbc89dba8 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 4 Oct 2023 19:37:36 +0200 Subject: Adding aoi fixation distribution to Basic module. --- src/argaze/GazeAnalysis/Basic.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/src/argaze/GazeAnalysis/Basic.py b/src/argaze/GazeAnalysis/Basic.py index 7b41731..dc7b4fd 100644 --- a/src/argaze/GazeAnalysis/Basic.py +++ b/src/argaze/GazeAnalysis/Basic.py @@ -79,12 +79,27 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer): self.__steps_number = len(aoi_scan_path) sum_fixation_durations = 0 + self.__sum_aoi_fixation_durations = {} for aoi_scan_step in aoi_scan_path: sum_fixation_durations += aoi_scan_step.fixation_duration + try: + + self.__sum_aoi_fixation_durations[aoi_scan_step.aoi] = self.__sum_aoi_fixation_durations[aoi_scan_step.aoi] + aoi_scan_step.fixation_duration + + except KeyError: + + self.__sum_aoi_fixation_durations[aoi_scan_step.aoi] = aoi_scan_step.fixation_duration + self.__step_fixation_durations_average = sum_fixation_durations / self.__steps_number + + self.__aoi_fixation_distribution = {} + + for aoi_name, sum_aoi_fixation_duration in self.__sum_aoi_fixation_durations.items(): + + self.__aoi_fixation_distribution[aoi_name] = sum_aoi_fixation_duration / sum_fixation_durations @property def path_duration(self) -> float: @@ -102,4 +117,10 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer): def step_fixation_durations_average(self) -> float: """AOI scan path step fixation durations average.""" - return self.__step_fixation_durations_average \ No newline at end of file + return self.__step_fixation_durations_average + + @property + def aoi_fixation_distribution(self) -> dict: + """percentage of time spent on each AOI.""" + + return self.__aoi_fixation_distribution \ No newline at end of file -- cgit v1.1 From ff2c7b8db71755576048e5c0ee9ec59a581c07fa Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 4 Oct 2023 19:41:01 +0200 Subject: Excluding AOI frame from aoi matching and from expected aoi. --- src/argaze/ArFeatures.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 43acf55..545bc8b 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -1330,25 +1330,42 @@ class ArCamera(ArFrame): scene.parent = self # Setup expected aoi of each layer aoi scan path with the aoi of corresponding scene layer + # Edit aoi matcher exclude attribute to ignore frame aoi for layer_name, layer in self.layers.items(): if layer.aoi_scan_path is not None: - all_aoi_list = [] + expected_aoi_list = [] + exclude_aoi_list = [] for scene_name, scene in self.scenes.items(): + # Append scene layer aoi to corresponding expected camera layer aoi try: scene_layer = scene.layers[layer_name] - all_aoi_list.extend(list(scene_layer.aoi_scene.keys())) + expected_aoi_list.extend(list(scene_layer.aoi_scene.keys())) except KeyError: continue - layer.aoi_scan_path.expected_aoi = all_aoi_list + # Remove scene frame from expected camera layer aoi + # Exclude scene frame from camera layer aoi matching + for frame_name, frame in scene.frames.items(): + + try: + + expected_aoi_list.remove(frame_name) + exclude_aoi_list.append(frame_name) + + except ValueError: + + continue + + layer.aoi_scan_path.expected_aoi = expected_aoi_list + layer.aoi_matcher.exclude = exclude_aoi_list # Init a lock to share scene projections into camera frame between multiple threads self._frame_lock = threading.Lock() -- cgit v1.1 From cdf4e23d2876b7a43b5a3712467d503723fa7a52 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Thu, 5 Oct 2023 11:38:13 +0200 Subject: removing ignored gaze position table. --- src/argaze/ArFeatures.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 545bc8b..b9b51d0 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -1369,9 +1369,6 @@ class ArCamera(ArFrame): # Init a lock to share scene projections into camera frame between multiple threads self._frame_lock = threading.Lock() - - # Define public timestamp buffer to store ignored gaze positions - self.ignored_gaze_positions = GazeFeatures.TimeStampedGazePositions() def __str__(self) -> str: """ @@ -1438,14 +1435,11 @@ class ArCamera(ArFrame): watch method needs to be called first. """ - # Can't use camera frame when it is locked - if self._frame_lock.locked(): - - # TODO: Store ignored timestamped gaze positions for further projections - # PB: This would imply to also store frame projections !!! - self.ignored_gaze_positions[timestamp] = gaze_position + # Can't use camera frame while it is locked + # TODO? Do we need a timeout parameter here? + while self._frame_lock.locked(): - return None, None + time.sleep(1e-6) # Lock camera frame exploitation self._frame_lock.acquire() -- cgit v1.1 From 64df8beaf90d9f0bbaf0b1b51dae225f86c6a4c4 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Thu, 5 Oct 2023 13:48:33 +0200 Subject: Working on gaze processing time assessment. --- src/argaze/ArFeatures.py | 68 +++++++++++++++++++++++------------------------- 1 file changed, 32 insertions(+), 36 deletions(-) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index b9b51d0..02da0fe 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -374,6 +374,9 @@ class ArLayer(): # Lock layer exploitation self.__look_lock.acquire() + # Store look execution start date + look_start = time.perf_counter() + # Update current gaze movement self.__gaze_movement = gaze_movement @@ -452,23 +455,13 @@ class ArLayer(): looked_aoi = None aoi_scan_path_analysis = {} exception = e + + # Assess total execution time in ms + execution_times['total'] = (time.perf_counter() - look_start) * 1e3 # Unlock layer exploitation self.__look_lock.release() - # Sum all execution times - total_execution_time = 0 - - if execution_times['aoi_matcher']: - - total_execution_time += execution_times['aoi_matcher'] - - for _, aoi_scan_path_analysis_time in execution_times['aoi_scan_step_analyzers'].items(): - - total_execution_time += aoi_scan_path_analysis_time - - execution_times['total'] = total_execution_time - # Return look data return looked_aoi, aoi_scan_path_analysis, execution_times, exception @@ -832,6 +825,9 @@ class ArFrame(): # Lock frame exploitation self.__look_lock.acquire() + # Store look execution start date + look_start = time.perf_counter() + # Update current gaze position self.__gaze_position = gaze_position @@ -950,30 +946,12 @@ class ArFrame(): scan_step_analysis = {} layer_analysis = {} exception = e - - # Unlock frame exploitation - self.__look_lock.release() - - # Sum all execution times - total_execution_time = 0 - - if execution_times['gaze_movement_identifier']: - total_execution_time += execution_times['gaze_movement_identifier'] + # Assess total execution time in ms + execution_times['total'] = (time.perf_counter() - look_start) * 1e3 - for _, scan_step_analysis_time in execution_times['scan_step_analyzers'].items(): - - total_execution_time += scan_step_analysis_time - - if execution_times['heatmap']: - - total_execution_time += execution_times['heatmap'] - - for _, layer_execution_times in execution_times['layers'].items(): - - total_execution_time += layer_execution_times['total'] - - execution_times['total'] = total_execution_time + # Unlock frame exploitation + self.__look_lock.release() # Return look data return identified_gaze_movement, scan_step_analysis, layer_analysis, execution_times, exception @@ -1431,15 +1409,33 @@ class ArCamera(ArFrame): def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition): """Project timestamped gaze position into each scene frames. + Parameters: + timestamp: gaze position time stamp (unit does'nt matter) + gaze_position: GazePosition object + timeout: maximal waiting time in ms + !!! warning watch method needs to be called first. """ # Can't use camera frame while it is locked - # TODO? Do we need a timeout parameter here? + wait_start = time.perf_counter() + waiting_time = 0 + while self._frame_lock.locked(): time.sleep(1e-6) + waiting_time = (time.perf_counter() - wait_start) * 1e3 + + # TODO? return waiting time? + + # TODO? add timeout parameter? + #if waiting_time > timeout: + # return None, None + + # DEBUG + if waiting_time > 0: + print(f'ArCamera: waiting {waiting_time:.3f} ms before to process gaze position at {timestamp} time.') # Lock camera frame exploitation self._frame_lock.acquire() -- cgit v1.1 From 8bb3cec466ace640c27b41106cac7f6a09dfcdbd Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Thu, 5 Oct 2023 22:07:12 +0200 Subject: Returning proejction time. --- .../aruco_markers_pipeline/advanced_topics/scripting.md | 8 ++++++-- src/argaze/ArUcoMarkers/ArUcoCamera.py | 12 ++++++++++-- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md b/docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md index 529bff8..892d6dd 100644 --- a/docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md +++ b/docs/user_guide/aruco_markers_pipeline/advanced_topics/scripting.md @@ -79,9 +79,9 @@ for name, aruco_scene in aruco_camera.scenes.items(): ...: # Watch image with ArUco camera - detection_time, exception = aruco_camera.watch(image) + detection_time, projection_time, exception = aruco_camera.watch(image) - # Do something with pipeline detection time + # Do something with pipeline times ... # Do something with pipeline exception @@ -95,6 +95,10 @@ Let's understand the meaning of each returned data. ArUco marker detection time in ms. +### *projection_time* + +Scenes projection time in ms. + ### *exception* A [python Exception](https://docs.python.org/3/tutorial/errors.html#exceptions) object raised during pipeline execution. diff --git a/src/argaze/ArUcoMarkers/ArUcoCamera.py b/src/argaze/ArUcoMarkers/ArUcoCamera.py index 4f00a3a..33f5b37 100644 --- a/src/argaze/ArUcoMarkers/ArUcoCamera.py +++ b/src/argaze/ArUcoMarkers/ArUcoCamera.py @@ -11,6 +11,7 @@ from typing import TypeVar, Tuple from dataclasses import dataclass, field import json import os +import time from argaze import ArFeatures, DataStructures from argaze.ArUcoMarkers import ArUcoMarkersDictionary, ArUcoDetector, ArUcoOpticCalibrator, ArUcoScene @@ -146,6 +147,7 @@ class ArUcoCamera(ArFeatures.ArCamera): Returns: detection time: aruco marker detection time in ms. + projection time: scenes projection time in ms. exception: dictionary with exception raised per scene. """ @@ -155,6 +157,9 @@ class ArUcoCamera(ArFeatures.ArCamera): # Lock camera frame exploitation self._frame_lock.acquire() + # Store projection execution start date + projection_start = time.perf_counter() + # Fill camera frame background with image self.background = image @@ -206,11 +211,14 @@ class ArUcoCamera(ArFeatures.ArCamera): exceptions[scene_name] = e + # Assess projection time in ms + projection_time = (time.perf_counter() - projection_start) * 1e3 + # Unlock camera frame exploitation self._frame_lock.release() - # Return dection time and exceptions - return detection_time, exceptions + # Return detection time, projection time and exceptions + return detection_time, projection_time, exceptions def __image(self, draw_detected_markers: dict = None, draw_scenes: dict = None, draw_optic_parameters_grid: dict = None, **kwargs: dict) -> numpy.array: """Get frame image with ArUco detection visualisation. -- cgit v1.1 From 8640b63b5b607ed0e197cb63428ae94b0baa98a7 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Thu, 5 Oct 2023 22:07:49 +0200 Subject: Using better detection parameters. Changing grid color. --- src/argaze/utils/demo_data/demo_aruco_markers_setup.json | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json index c881452..7a4f6d1 100644 --- a/src/argaze/utils/demo_data/demo_aruco_markers_setup.json +++ b/src/argaze/utils/demo_data/demo_aruco_markers_setup.json @@ -5,9 +5,10 @@ "dictionary": "DICT_APRILTAG_16h5", "marker_size": 5, "parameters": { - "cornerRefinementMethod": 1, + "cornerRefinementMethod": 3, "aprilTagQuadSigma": 2, - "aprilTagDeglitch": 1 + "aprilTagDeglitch": 1, + "useAruco3Detection": 1 } }, "layers": { @@ -40,7 +41,7 @@ "height": 72, "z": 100, "point_size": 1, - "point_color": [0, 0, 255] + "point_color": [127, 127, 127] }, "draw_scenes": { "ArScene Demo": { -- cgit v1.1 From 1e39f54a7222ed53c7c514be555e06aa5e7372b7 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Thu, 5 Oct 2023 22:08:18 +0200 Subject: Improving time assessment. --- src/argaze/utils/demo_aruco_markers_run.py | 71 +++++++++++++++++++++++++----- 1 file changed, 61 insertions(+), 10 deletions(-) diff --git a/src/argaze/utils/demo_aruco_markers_run.py b/src/argaze/utils/demo_aruco_markers_run.py index 6dc081d..6c22695 100644 --- a/src/argaze/utils/demo_aruco_markers_run.py +++ b/src/argaze/utils/demo_aruco_markers_run.py @@ -14,6 +14,7 @@ import time from argaze import ArFeatures, GazeFeatures from argaze.ArUcoMarkers import ArUcoCamera +from argaze.utils import UtilsFeatures import cv2 import numpy @@ -40,9 +41,29 @@ def main(): # Init timestamp start_time = time.time() + # Prepare gaze analysis assessment + call_chrono = UtilsFeatures.TimeProbe() + call_chrono.start() + + gaze_positions_frequency = 0 + gaze_analysis_time = 0 + # Fake gaze position with mouse pointer def on_mouse_event(event, x, y, flags, param): + nonlocal gaze_positions_frequency + nonlocal gaze_analysis_time + + # Assess gaze analysis + lap_time, nb_laps, elapsed_time = call_chrono.lap() + + if elapsed_time > 1e3: + + gaze_positions_frequency = nb_laps + call_chrono.restart() + + gaze_analysis_time = 0 + # Edit millisecond timestamp timestamp = int((time.time() - start_time) * 1e3) @@ -54,12 +75,20 @@ def main(): gaze_movement, scan_step_analysis, layer_analysis, execution_times, exception = look_data - # Do something with look data - # ... + # Assess gaze analysis + gaze_analysis_time += execution_times['total'] # Attach mouse callback to window cv2.setMouseCallback(aruco_camera.name, on_mouse_event) + # Prepare video fps assessment + video_fps = 0 + video_chrono = UtilsFeatures.TimeProbe() + video_chrono.start() + + # Prepare visualisation time assessment + visualisation_time = 0 + # Enable camera video capture into separate thread video_capture = cv2.VideoCapture(int(args.source) if args.source.isdecimal() else args.source) @@ -69,30 +98,48 @@ def main(): # Capture images while video_capture.isOpened(): + # Assess capture time + capture_start = time.time() + # Read video image success, video_image = video_capture.read() + # Assess capture time + capture_time = int((time.time() - capture_start) * 1e3) + if success: + # Assess video fps + lap_time, nb_laps, elapsed_time = video_chrono.lap() + + if elapsed_time > 1e3: + + video_fps = nb_laps + video_chrono.restart() + # Detect and project AR features - detection_time, exceptions = aruco_camera.watch(video_image) + detection_time, projection_time, exceptions = aruco_camera.watch(video_image) + + # Assess visualisation time + visualisation_start = time.time() # Get ArUcoCamera frame image aruco_camera_image = aruco_camera.image() - # Write detection fps - cv2.rectangle(aruco_camera_image, (0, 0), (420, 50), (63, 63, 63), -1) - cv2.putText(aruco_camera_image, f'Detection fps: {1e3/detection_time:.1f}', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) + # Write time info + cv2.rectangle(aruco_camera_image, (0, 0), (aruco_camera.size[0], 100), (63, 63, 63), -1) + cv2.putText(aruco_camera_image, f'{video_fps} FPS | Capture {capture_time}ms | Detection {int(detection_time)}ms | Projection {int(projection_time)}ms | Visualisation {visualisation_time}ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) + cv2.putText(aruco_camera_image, f'{gaze_positions_frequency} gaze positions/s | Gaze analysis {gaze_analysis_time:.2f}ms', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) # Handle exceptions for i, (scene_name, e) in enumerate(exceptions.items()): # Write errors - cv2.rectangle(aruco_camera_image, (0, (i+1)*50), (720, (i+2)*50), (127, 127, 127), -1) - cv2.putText(aruco_camera_image, f'{scene_name} error: {e}', (20, (i+1)*90), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) + cv2.rectangle(aruco_camera_image, (0, (i+1)*100), (aruco_camera.size[0], (i+2)*80), (127, 127, 127), -1) + cv2.putText(aruco_camera_image, f'{scene_name} error: {e}', (20, (i+1)*140), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) # Write hint - cv2.putText(aruco_camera_image, 'Mouve mouse pointer over gray rectangle area', (450, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) + cv2.putText(aruco_camera_image, 'Mouve mouse pointer over gray rectangle area', (20, aruco_camera.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) # Display ArUcoCamera frame image cv2.imshow(aruco_camera.name, aruco_camera_image) @@ -104,11 +151,15 @@ def main(): cv2.imshow(f'{scene_frame.parent.name}:{scene_frame.name}', scene_frame.image()) # Stop by pressing 'Esc' key - if cv2.waitKey(10) == 27: + # NOTE: on MacOS, cv2.waitKey(1) waits ~40ms + if cv2.waitKey(1) == 27: # Close camera video capture video_capture.release() + # Assess visualisation time + visualisation_time = int((time.time() - visualisation_start) * 1e3) + # Stop image display cv2.destroyAllWindows() -- cgit v1.1 From 07d8e2535a8d902d5cb731d8343ebc349c198d65 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Fri, 6 Oct 2023 00:15:42 +0200 Subject: Assessing visualisation time even when no image is read. --- src/argaze/utils/demo_aruco_markers_run.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/argaze/utils/demo_aruco_markers_run.py b/src/argaze/utils/demo_aruco_markers_run.py index 6c22695..5e1ac2e 100644 --- a/src/argaze/utils/demo_aruco_markers_run.py +++ b/src/argaze/utils/demo_aruco_markers_run.py @@ -150,6 +150,11 @@ def main(): # Display scene frame cv2.imshow(f'{scene_frame.parent.name}:{scene_frame.name}', scene_frame.image()) + else: + + # Assess visualisation time + visualisation_start = time.time() + # Stop by pressing 'Esc' key # NOTE: on MacOS, cv2.waitKey(1) waits ~40ms if cv2.waitKey(1) == 27: -- cgit v1.1 From 992b84ea72e1d20b395ab8d3d50abbd494c1a749 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 10 Oct 2023 13:51:11 +0200 Subject: Fixing KCoefficient formula. --- src/argaze/GazeAnalysis/KCoefficient.py | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/src/argaze/GazeAnalysis/KCoefficient.py b/src/argaze/GazeAnalysis/KCoefficient.py index 80fe1fd..c50bc3a 100644 --- a/src/argaze/GazeAnalysis/KCoefficient.py +++ b/src/argaze/GazeAnalysis/KCoefficient.py @@ -52,19 +52,24 @@ class ScanPathAnalyzer(GazeFeatures.ScanPathAnalyzer): duration_std = numpy.std(durations) amplitude_std = numpy.std(amplitudes) - Ks = [] - for scan_step in scan_path: + if duration_std > 0. and amplitude_std > 0.: + + Ks = [] + for scan_step in scan_path: + + Ks.append((abs(scan_step.duration - duration_mean) / duration_std) - (abs(scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std)) + + self.__K = numpy.array(Ks).mean() - Ks.append(((scan_step.duration - duration_mean) / duration_std) - ((scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std)) + else: - self.__K = numpy.array(Ks).mean() + self.__K = 0. @property def K(self) -> float: """K coefficient.""" return self.__K - @dataclass class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer): @@ -104,12 +109,18 @@ class AOIScanPathAnalyzer(GazeFeatures.AOIScanPathAnalyzer): duration_std = numpy.std(durations) amplitude_std = numpy.std(amplitudes) - Ks = [] - for aoi_scan_step in aoi_scan_path: + if duration_std > 0. and amplitude_std > 0.: + + Ks = [] + for aoi_scan_step in aoi_scan_path: + + Ks.append((abs(aoi_scan_step.duration - duration_mean) / duration_std) - (abs(aoi_scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std)) + + self.__K = numpy.array(Ks).mean() - Ks.append(((aoi_scan_step.duration - duration_mean) / duration_std) - ((aoi_scan_step.last_saccade.amplitude - amplitude_mean) / amplitude_std)) + else: - self.__K = numpy.array(Ks).mean() + self.__K = 0. @property def K(self) -> float: -- cgit v1.1 From d7107ed868229b9665ee7432dcdc1da90c97c75a Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 10 Oct 2023 16:25:56 +0200 Subject: Replacing ExploitExplore by ExploreExploit. --- .../configuration_and_execution.md | 4 +- .../pipeline_modules/scan_path_analyzers.md | 6 +- .../GazeAnalysis/ExploitExploreRatio.py | 6 +- src/argaze/GazeAnalysis/ExploitExploreRatio.py | 75 ---------------------- src/argaze/GazeAnalysis/ExploreExploitRatio.py | 70 ++++++++++++++++++++ src/argaze/GazeAnalysis/__init__.py | 2 +- .../utils/demo_data/demo_gaze_analysis_setup.json | 2 +- src/argaze/utils/demo_gaze_analysis_run.py | 6 +- 8 files changed, 83 insertions(+), 88 deletions(-) delete mode 100644 src/argaze/GazeAnalysis/ExploitExploreRatio.py create mode 100644 src/argaze/GazeAnalysis/ExploreExploitRatio.py diff --git a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md index 3b21cbd..c40039c 100644 --- a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md +++ b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md @@ -26,7 +26,7 @@ Here is a simple JSON [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) conf }, "scan_path_analyzers": { "Basic": {}, - "ExploitExploreRatio": { + "ExploreExploitRatio": { "short_fixation_duration_threshold": 0 } } @@ -90,7 +90,7 @@ Finally, the last [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) pipeline Each analysis algorithm can be selected by instantiating a particular [ScanPathAnalyzer from GazeAnalysis submodule](pipeline_modules/scan_path_analyzers.md) or [from another python package](advanced_topics/module_loading.md). -In the example file, the choosen analysis algorithms are the [Basic](../../argaze.md/#argaze.GazeAnalysis.Basic) module and the [ExploitExploreRatio](../../argaze.md/#argaze.GazeAnalysis.ExploitExploreRatio) module which has one specific *short_fixation_duration_threshold* attribute. +In the example file, the choosen analysis algorithms are the [Basic](../../argaze.md/#argaze.GazeAnalysis.Basic) module and the [ExploreExploitRatio](../../argaze.md/#argaze.GazeAnalysis.ExploreExploitRatio) module which has one specific *short_fixation_duration_threshold* attribute. ## Pipeline execution diff --git a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/scan_path_analyzers.md b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/scan_path_analyzers.md index afba844..f9f757a 100644 --- a/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/scan_path_analyzers.md +++ b/docs/user_guide/gaze_analysis_pipeline/pipeline_modules/scan_path_analyzers.md @@ -13,15 +13,15 @@ Here are JSON samples to include a chosen module inside [ArFrame configuration]( [See in code reference](../../../argaze.md/#argaze.GazeAnalysis.Basic.ScanPathAnalyzer) -## Exploit/Explore ratio +## Explore/Exploit ratio ```json -"ExploitExploreRatio": { +"ExploreExploitRatio": { "short_fixation_duration_threshold": 0 } ``` -[See in code reference](../../../argaze.md/#argaze.GazeAnalysis.ExploitExploreRatio.ScanPathAnalyzer) +[See in code reference](../../../argaze.md/#argaze.GazeAnalysis.ExploreExploitRatio.ScanPathAnalyzer) ## K coefficient diff --git a/src/argaze.test/GazeAnalysis/ExploitExploreRatio.py b/src/argaze.test/GazeAnalysis/ExploitExploreRatio.py index 0e6b74a..7b323d4 100644 --- a/src/argaze.test/GazeAnalysis/ExploitExploreRatio.py +++ b/src/argaze.test/GazeAnalysis/ExploitExploreRatio.py @@ -10,7 +10,7 @@ __license__ = "BSD" import unittest from argaze import GazeFeatures -from argaze.GazeAnalysis import ExploitExploreRatio +from argaze.GazeAnalysis import ExploreExploitRatio from argaze.utils import UtilsFeatures GazeFeaturesTest = UtilsFeatures.importFromTestPackage('GazeFeatures') @@ -21,7 +21,7 @@ class TestScanPathAnalyzer(unittest.TestCase): def test_analyze(self): """Test analyze method.""" - xxr_analyzer = ExploitExploreRatio.ScanPathAnalyzer() + xxr_analyzer = ExploreExploitRatio.ScanPathAnalyzer() scan_path = GazeFeaturesTest.build_scan_path(10) @@ -31,7 +31,7 @@ class TestScanPathAnalyzer(unittest.TestCase): xxr_analyzer.analyze(scan_path) # Check exploit explore ratio: it should greater than 1 because of build_scan_path - self.assertGreaterEqual(xxr_analyzer.exploit_explore_ratio, 1.) + self.assertGreaterEqual(xxr_analyzer.explore_exploit_ratio, 1.) if __name__ == '__main__': diff --git a/src/argaze/GazeAnalysis/ExploitExploreRatio.py b/src/argaze/GazeAnalysis/ExploitExploreRatio.py deleted file mode 100644 index f35561f..0000000 --- a/src/argaze/GazeAnalysis/ExploitExploreRatio.py +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env python - -"""Exploit/Explore ratio module. -""" - -__author__ = "Théo de la Hogue" -__credits__ = [] -__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)" -__license__ = "BSD" - -from dataclasses import dataclass, field - -from argaze import GazeFeatures - -import numpy - -@dataclass -class ScanPathAnalyzer(GazeFeatures.ScanPathAnalyzer): - """Implementation of exploit vs explore ratio algorithm as described in: - - **Goldberg J. H., Kotval X. P. (1999).** - *Computer interface evaluation using eye movements: methods and constructs.* - International Journal of Industrial Ergonomics (631–645). - [https://doi.org/10.1016/S0169-8141(98)00068-7](https://doi.org/10.1016/S0169-8141\\(98\\)00068-7) - - **Dehais F., Peysakhovich V., Scannella S., Fongue J., Gateau T. (2015).** - *Automation surprise in aviation: Real-time solutions.* - Proceedings of the 33rd annual ACM conference on Human Factors in Computing Systems (2525–2534). - [https://doi.org/10.1145/2702123.2702521](https://doi.org/10.1145/2702123.2702521) - """ - - short_fixation_duration_threshold: float = field(default=0.) - """Time below which a fixation is considered to be short and so as exploratory.""" - - def __post_init__(self): - - super().__init__() - - self.__exploit_explore_ratio = 0. - - def analyze(self, scan_path: GazeFeatures.ScanPathType): - """Analyze scan path.""" - - assert(len(scan_path) > 1) - - short_fixations_durations = [] - long_fixations_durations = [] - saccades_durations = [] - - for scan_step in scan_path: - - if scan_step.first_fixation.duration > self.short_fixation_duration_threshold: - - long_fixations_durations.append(scan_step.first_fixation.duration) - - else: - - short_fixations_durations.append(scan_step.first_fixation.duration) - - saccades_durations.append(scan_step.last_saccade.duration) - - short_fixations_duration = numpy.array(short_fixations_durations).sum() - long_fixations_duration = numpy.array(long_fixations_durations).sum() - saccades_duration = numpy.array(saccades_durations).sum() - - assert(saccades_duration + short_fixations_duration > 0) - - self.__exploit_explore_ratio = long_fixations_duration / (saccades_duration + short_fixations_duration) - - @property - def exploit_explore_ratio(self) -> float: - """Exploit/Explore ratio.""" - - return self.__exploit_explore_ratio - \ No newline at end of file diff --git a/src/argaze/GazeAnalysis/ExploreExploitRatio.py b/src/argaze/GazeAnalysis/ExploreExploitRatio.py new file mode 100644 index 0000000..b4550e7 --- /dev/null +++ b/src/argaze/GazeAnalysis/ExploreExploitRatio.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python + +"""Explore/Explore ratio module. +""" + +__author__ = "Théo de la Hogue" +__credits__ = [] +__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)" +__license__ = "BSD" + +from dataclasses import dataclass, field + +from argaze import GazeFeatures + +import numpy + +@dataclass +class ScanPathAnalyzer(GazeFeatures.ScanPathAnalyzer): + """Implementation of explore vs exploit ratio algorithm as described in: + + **Dehais F., Peysakhovich V., Scannella S., Fongue J., Gateau T. (2015).** + *Automation surprise in aviation: Real-time solutions.* + Proceedings of the 33rd annual ACM conference on Human Factors in Computing Systems (2525–2534). + [https://doi.org/10.1145/2702123.2702521](https://doi.org/10.1145/2702123.2702521) + """ + + short_fixation_duration_threshold: float = field(default=0.) + """Time below which a fixation is considered to be short and so as exploratory.""" + + def __post_init__(self): + + super().__init__() + + self.__explore_exploit_ratio = 0. + + def analyze(self, scan_path: GazeFeatures.ScanPathType): + """Analyze scan path.""" + + assert(len(scan_path) > 1) + + short_fixations_durations = [] + long_fixations_durations = [] + saccades_durations = [] + + for scan_step in scan_path: + + if scan_step.first_fixation.duration > self.short_fixation_duration_threshold: + + long_fixations_durations.append(scan_step.first_fixation.duration) + + else: + + short_fixations_durations.append(scan_step.first_fixation.duration) + + saccades_durations.append(scan_step.last_saccade.duration) + + short_fixations_duration = numpy.array(short_fixations_durations).sum() + long_fixations_duration = numpy.array(long_fixations_durations).sum() + saccades_duration = numpy.array(saccades_durations).sum() + + assert(long_fixations_duration > 0) + + self.__explore_exploit_ratio = (saccades_duration + short_fixations_duration) / long_fixations_duration + + @property + def explore_exploit_ratio(self) -> float: + """Explore/Exploit ratio.""" + + return self.__explore_exploit_ratio + \ No newline at end of file diff --git a/src/argaze/GazeAnalysis/__init__.py b/src/argaze/GazeAnalysis/__init__.py index 164de74..62e0823 100644 --- a/src/argaze/GazeAnalysis/__init__.py +++ b/src/argaze/GazeAnalysis/__init__.py @@ -1,4 +1,4 @@ """ Various gaze movement identification, AOI matching and scan path analysis algorithms. """ -__all__ = ['Basic', 'DispersionThresholdIdentification', 'VelocityThresholdIdentification', 'TransitionMatrix', 'KCoefficient', 'LempelZivComplexity', 'NGram', 'Entropy', 'NearestNeighborIndex', 'ExploitExploreRatio'] \ No newline at end of file +__all__ = ['Basic', 'DispersionThresholdIdentification', 'VelocityThresholdIdentification', 'TransitionMatrix', 'KCoefficient', 'LempelZivComplexity', 'NGram', 'Entropy', 'NearestNeighborIndex', 'ExploreExploitRatio'] \ No newline at end of file diff --git a/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json b/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json index fe5d197..f921662 100644 --- a/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json +++ b/src/argaze/utils/demo_data/demo_gaze_analysis_setup.json @@ -18,7 +18,7 @@ "NearestNeighborIndex": { "size": [1920, 1149] }, - "ExploitExploreRatio": { + "ExploreExploitRatio": { "short_fixation_duration_threshold": 0 } }, diff --git a/src/argaze/utils/demo_gaze_analysis_run.py b/src/argaze/utils/demo_gaze_analysis_run.py index 789657b..9856d90 100644 --- a/src/argaze/utils/demo_gaze_analysis_run.py +++ b/src/argaze/utils/demo_gaze_analysis_run.py @@ -206,12 +206,12 @@ def main(): except KeyError: pass - # Display Exploit/Explore ratio analysis if loaded + # Display Explore/Exploit ratio analysis if loaded try: - xxr_analyser = ar_frame.scan_path_analyzers["argaze.GazeAnalysis.ExploitExploreRatio"] + xxr_analyser = ar_frame.scan_path_analyzers["argaze.GazeAnalysis.ExploreExploitRatio"] - cv2.putText(frame_image, f'Exploit explore ratio: {xxr_analyser.exploit_explore_ratio:.3f}', (20, ar_frame.size[1]-360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) + cv2.putText(frame_image, f'Explore/Exploit ratio: {xxr_analyser.explore_exploit_ratio:.3f}', (20, ar_frame.size[1]-360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) except KeyError: -- cgit v1.1 From b8ea27be0fdaba40c49b93a2e84756fb00c6cde5 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 10 Oct 2023 18:09:02 +0200 Subject: Adding visual HFOV and VFOV parameter to ArCamera. Using them into ArScene project method and ArUcoCamera class. --- src/argaze/ArFeatures.py | 10 ++++++++-- src/argaze/ArUcoMarkers/ArUcoCamera.py | 2 +- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 02da0fe..1b24957 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -1239,13 +1239,14 @@ class ArScene(): raise NotImplementedError('estimate_pose() method not implemented') - def project(self, tvec: numpy.array, rvec: numpy.array, visual_hfov: float = 0.) -> Tuple[str, AOI2DScene.AOI2DScene]: - """Project layers according estimated pose and optional horizontal field of view clipping angle. + def project(self, tvec: numpy.array, rvec: numpy.array, visual_hfov: float = 0., visual_vfov: float = 0.) -> Tuple[str, AOI2DScene.AOI2DScene]: + """Project layers according estimated pose and optional field of view clipping angles. Parameters: tvec: translation vector rvec: rotation vector visual_hfov: horizontal field of view clipping angle + visual_vfov: vertical field of view clipping angle Returns: layer_name: name of projected layer @@ -1255,6 +1256,7 @@ class ArScene(): for name, layer in self.layers.items(): # Clip AOI out of the visual horizontal field of view (optional) + # TODO: use HFOV and VFOV and don't use vision_cone method if visual_hfov > 0: # Transform layer aoi scene into camera referential @@ -1293,9 +1295,13 @@ class ArCamera(ArFrame): Parameters: scenes: all scenes to project into camera frame + visual_hfov: Optional angle in degree to clip scenes projection according visual horizontal field of view (HFOV). + visual_vfov: Optional angle in degree to clip scenes projection according visual vertical field of view (VFOV). """ scenes: dict = field(default_factory=dict) + visual_hfov: float = field(default=0.) + visual_vfov: float = field(default=0.) def __post_init__(self): diff --git a/src/argaze/ArUcoMarkers/ArUcoCamera.py b/src/argaze/ArUcoMarkers/ArUcoCamera.py index 33f5b37..f39c516 100644 --- a/src/argaze/ArUcoMarkers/ArUcoCamera.py +++ b/src/argaze/ArUcoMarkers/ArUcoCamera.py @@ -196,7 +196,7 @@ class ArUcoCamera(ArFeatures.ArCamera): tvec, rmat, _, _ = scene.estimate_pose(self.aruco_detector.detected_markers) # Project scene into camera frame according estimated pose - for layer_name, layer_projection in scene.project(tvec, rmat): + for layer_name, layer_projection in scene.project(tvec, rmat, self.visual_hfov, self.visual_vfov): try: -- cgit v1.1 From 46b2ac7ea7fb83d520d3fe5deee46d629d6dc9d0 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 11 Oct 2023 07:34:33 +0200 Subject: renaming test file. --- .../GazeAnalysis/ExploitExploreRatio.py | 38 ---------------------- .../GazeAnalysis/ExploreExploitRatio.py | 38 ++++++++++++++++++++++ 2 files changed, 38 insertions(+), 38 deletions(-) delete mode 100644 src/argaze.test/GazeAnalysis/ExploitExploreRatio.py create mode 100644 src/argaze.test/GazeAnalysis/ExploreExploitRatio.py diff --git a/src/argaze.test/GazeAnalysis/ExploitExploreRatio.py b/src/argaze.test/GazeAnalysis/ExploitExploreRatio.py deleted file mode 100644 index 7b323d4..0000000 --- a/src/argaze.test/GazeAnalysis/ExploitExploreRatio.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python - -""" """ - -__author__ = "Théo de la Hogue" -__credits__ = [] -__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)" -__license__ = "BSD" - -import unittest - -from argaze import GazeFeatures -from argaze.GazeAnalysis import ExploreExploitRatio -from argaze.utils import UtilsFeatures - -GazeFeaturesTest = UtilsFeatures.importFromTestPackage('GazeFeatures') - -class TestScanPathAnalyzer(unittest.TestCase): - """Test ScanPathAnalyzer class.""" - - def test_analyze(self): - """Test analyze method.""" - - xxr_analyzer = ExploreExploitRatio.ScanPathAnalyzer() - - scan_path = GazeFeaturesTest.build_scan_path(10) - - # Check scan path - self.assertEqual(len(scan_path), 10) - - xxr_analyzer.analyze(scan_path) - - # Check exploit explore ratio: it should greater than 1 because of build_scan_path - self.assertGreaterEqual(xxr_analyzer.explore_exploit_ratio, 1.) - -if __name__ == '__main__': - - unittest.main() \ No newline at end of file diff --git a/src/argaze.test/GazeAnalysis/ExploreExploitRatio.py b/src/argaze.test/GazeAnalysis/ExploreExploitRatio.py new file mode 100644 index 0000000..7b323d4 --- /dev/null +++ b/src/argaze.test/GazeAnalysis/ExploreExploitRatio.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python + +""" """ + +__author__ = "Théo de la Hogue" +__credits__ = [] +__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)" +__license__ = "BSD" + +import unittest + +from argaze import GazeFeatures +from argaze.GazeAnalysis import ExploreExploitRatio +from argaze.utils import UtilsFeatures + +GazeFeaturesTest = UtilsFeatures.importFromTestPackage('GazeFeatures') + +class TestScanPathAnalyzer(unittest.TestCase): + """Test ScanPathAnalyzer class.""" + + def test_analyze(self): + """Test analyze method.""" + + xxr_analyzer = ExploreExploitRatio.ScanPathAnalyzer() + + scan_path = GazeFeaturesTest.build_scan_path(10) + + # Check scan path + self.assertEqual(len(scan_path), 10) + + xxr_analyzer.analyze(scan_path) + + # Check exploit explore ratio: it should greater than 1 because of build_scan_path + self.assertGreaterEqual(xxr_analyzer.explore_exploit_ratio, 1.) + +if __name__ == '__main__': + + unittest.main() \ No newline at end of file -- cgit v1.1 From 0d75c4adcb98426a2bc60b019bced9ae78dcf811 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 11 Oct 2023 07:35:12 +0200 Subject: Removing annotation. --- src/argaze/ArFeatures.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 1b24957..b4c2658 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -1418,7 +1418,6 @@ class ArCamera(ArFrame): Parameters: timestamp: gaze position time stamp (unit does'nt matter) gaze_position: GazePosition object - timeout: maximal waiting time in ms !!! warning watch method needs to be called first. -- cgit v1.1 From f849add84404d6cfa3be5b5d29b62ceb67622f89 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 11 Oct 2023 07:35:48 +0200 Subject: Fixing bad output annotation. --- src/argaze/ArUcoMarkers/ArUcoCamera.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/argaze/ArUcoMarkers/ArUcoCamera.py b/src/argaze/ArUcoMarkers/ArUcoCamera.py index f39c516..b850dde 100644 --- a/src/argaze/ArUcoMarkers/ArUcoCamera.py +++ b/src/argaze/ArUcoMarkers/ArUcoCamera.py @@ -142,7 +142,7 @@ class ArUcoCamera(ArFeatures.ArCamera): return ArUcoCamera.from_dict(aruco_camera_data, working_directory) - def watch(self, image: numpy.array) -> Tuple[float, dict]: + def watch(self, image: numpy.array) -> Tuple[float, float, dict]: """Detect environment aruco markers from image and project scenes into camera frame. Returns: -- cgit v1.1 From cbf0dc4a328763970aa41e8d5081473cca7411a9 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 11 Oct 2023 07:36:14 +0200 Subject: Improving documentation. --- docs/user_guide/aruco_markers_pipeline/introduction.md | 8 ++++---- docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md | 2 +- .../gaze_analysis_pipeline/configuration_and_execution.md | 2 +- docs/user_guide/gaze_analysis_pipeline/introduction.md | 8 ++++---- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/user_guide/aruco_markers_pipeline/introduction.md b/docs/user_guide/aruco_markers_pipeline/introduction.md index a83da9a..37ab055 100644 --- a/docs/user_guide/aruco_markers_pipeline/introduction.md +++ b/docs/user_guide/aruco_markers_pipeline/introduction.md @@ -20,10 +20,10 @@ To build your own ArUco markers pipeline, you need to know: * [How to estimate scene pose](pose_estimation.md), * [How to describe scene's AOI](aoi_3d_description.md), * [How to project 3D AOI into camera frame](aoi_3d_projection.md), -* [How to define a 3D AOI as a frame](aoi_3d_frame.md) +* [How to define a 3D AOI as a frame](aoi_3d_frame.md). More advanced features are also explained like: -* [How to script ArUco markers pipeline](advanced_topics/scripting.md) -* [How to calibrate optic parameters](advanced_topics/optic_parameters_calibration.md) -* [How to improve ArUco markers detection](advanced_topics/aruco_detector_configuration.md) +* [How to script ArUco markers pipeline](advanced_topics/scripting.md), +* [How to calibrate optic parameters](advanced_topics/optic_parameters_calibration.md), +* [How to improve ArUco markers detection](advanced_topics/aruco_detector_configuration.md). diff --git a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md index 3fd15db..66fa12f 100644 --- a/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md +++ b/docs/user_guide/gaze_analysis_pipeline/aoi_analysis.md @@ -1,7 +1,7 @@ Enable AOI analysis =================== -Once [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) is [configured](configuration_and_execution.md) and [2D AOI are described](aoi_2d_description.md), fixation can be matched with AOI to build an AOI scan path before analyze it. +Once [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) is [configured](configuration_and_execution.md) and [2D AOI are described](aoi_2d_description.md), fixation can be matched with AOI to build an AOI scan path before analyzing it. ![Layer](../../img/ar_layer.png) diff --git a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md index c40039c..71d3c33 100644 --- a/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md +++ b/docs/user_guide/gaze_analysis_pipeline/configuration_and_execution.md @@ -107,4 +107,4 @@ Timestamped gaze positions have to be passed one by one to [ArFrame.look](../../ At this point, the [ArFrame.look](../../argaze.md/#argaze.ArFeatures.ArFrame.look) method only process gaze movement identification and scan path analysis without any AOI neither any logging or visualisation supports. - Read the next chapters to learn how to [describe frame's AOI](aoi_2d_description.md), [add AOI analysis](aoi_analysis.md), [log gaze analysis](logging.md) and [visualize pipeline steps](visualisation.md). \ No newline at end of file + Read the next chapters to learn how to [describe AOI](aoi_2d_description.md), [add AOI analysis](aoi_analysis.md), [log gaze analysis](logging.md) and [visualize pipeline steps](visualisation.md). \ No newline at end of file diff --git a/docs/user_guide/gaze_analysis_pipeline/introduction.md b/docs/user_guide/gaze_analysis_pipeline/introduction.md index 339dd90..65cc53a 100644 --- a/docs/user_guide/gaze_analysis_pipeline/introduction.md +++ b/docs/user_guide/gaze_analysis_pipeline/introduction.md @@ -11,14 +11,14 @@ To build your own gaze analysis pipeline, you need to know: * [How to edit timestamped gaze positions](timestamped_gaze_positions_edition.md), * [How to load and execute gaze analysis pipeline](configuration_and_execution.md), -* [How to describe frame's AOI](aoi_2d_description.md), +* [How to describe AOI](aoi_2d_description.md), * [How to enable AOI analysis](aoi_analysis.md), * [How to visualize pipeline steps outputs](visualisation.md), * [How to log resulted gaze analysis](logging.md), -* [How to make heatmap image](heatmap.md). +* [How to make heatmap image](heatmap.md), * [How to add a background image](background.md). More advanced features are also explained like: -* [How to script gaze analysis pipeline](advanced_topics/scripting.md) -* [How to load module from another package](advanced_topics/module_loading.md) +* [How to script gaze analysis pipeline](advanced_topics/scripting.md), +* [How to load module from another package](advanced_topics/module_loading.md). -- cgit v1.1 From 749adde269420ae7e84849bf72aa087256d10ee7 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 11 Oct 2023 11:46:38 +0200 Subject: Commenting debug print. --- src/argaze/ArFeatures.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index b4c2658..a1c7349 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -1439,8 +1439,8 @@ class ArCamera(ArFrame): # return None, None # DEBUG - if waiting_time > 0: - print(f'ArCamera: waiting {waiting_time:.3f} ms before to process gaze position at {timestamp} time.') + #if waiting_time > 0: + # print(f'ArCamera: waiting {waiting_time:.3f} ms before to process gaze position at {timestamp} time.') # Lock camera frame exploitation self._frame_lock.acquire() -- cgit v1.1 From 46b3fb454275d7431e8ea894c887179c1704c84c Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 11 Oct 2023 15:17:46 +0200 Subject: Removing useless private attributes fro ArUcoDetector class. --- src/argaze/ArUcoMarkers/ArUcoDetector.py | 46 +++++++++++++++----------------- 1 file changed, 21 insertions(+), 25 deletions(-) diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py index 490b75b..3ef7fa6 100644 --- a/src/argaze/ArUcoMarkers/ArUcoDetector.py +++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py @@ -149,8 +149,6 @@ class ArUcoDetector(): # Init detected markers data self.__detected_markers = {} - self.__detected_markers_corners = [] - self.__detected_markers_ids = [] # Init detected board data self.__board = None @@ -267,31 +265,31 @@ class ArUcoDetector(): """ # Reset detected markers data - self.__detected_markers, self.__detected_markers_corners, self.__detected_markers_ids = {}, [], [] + self.__detected_markers, detected_markers_corners, detected_markers_ids = {}, [], [] # Store marker detection start date detection_start = time.perf_counter() # Detect markers into gray picture - self.__detected_markers_corners, self.__detected_markers_ids, _ = aruco.detectMarkers(cv.cvtColor(image, cv.COLOR_BGR2GRAY), self.dictionary.markers, parameters = self.parameters.internal) + detected_markers_corners, detected_markers_ids, _ = aruco.detectMarkers(cv.cvtColor(image, cv.COLOR_BGR2GRAY), self.dictionary.markers, parameters = self.parameters.internal) # Assess marker detection time in ms detection_time = (time.perf_counter() - detection_start) * 1e3 # Is there detected markers ? - if len(self.__detected_markers_corners) > 0: + if len(detected_markers_corners) > 0: # Transform markers ids array into list - self.__detected_markers_ids = self.__detected_markers_ids.T[0] + detected_markers_ids = detected_markers_ids.T[0] # Gather detected markers data and update metrics self.__detection_count += 1 - for i, marker_id in enumerate(self.__detected_markers_ids): + for i, marker_id in enumerate(detected_markers_ids): marker = ArUcoMarker.ArUcoMarker(self.dictionary, marker_id, self.marker_size) - marker.corners = self.__detected_markers_corners[i] + marker.corners = detected_markers_corners[i] # No pose estimation: call estimate_markers_pose to get one marker.translation = numpy.empty([0]) @@ -300,6 +298,7 @@ class ArUcoDetector(): self.__detected_markers[marker_id] = marker + # Update metrics self.__detected_ids.append(marker_id) return detection_time @@ -308,26 +307,23 @@ class ArUcoDetector(): """Estimate pose of current detected markers or of given markers id list.""" # Is there detected markers ? - if len(self.__detected_markers_corners) > 0: + if len(self.__detected_markers) > 0: - # Is there a marker selection ? - if len(markers_ids) > 0: + # Select all markers by default + if len(markers_ids) == 0: - selected_markers_corners = tuple() - selected_markers_ids = [] + markers_ids = self.__detected_markers.keys() - for i, marker_id in enumerate(self.__detected_markers_ids): + # Prepare data for aruco.estimatePoseSingleMarkers function + selected_markers_corners = tuple() + selected_markers_ids = [] - if marker_id in markers_ids: - - selected_markers_corners += (self.__detected_markers_corners[i],) - selected_markers_ids.append(marker_id) + for marker_id, marker in self.__detected_markers.items(): - # Otherwise, estimate pose of all markers - else: + if marker_id in markers_ids: - selected_markers_corners = self.__detected_markers_corners - selected_markers_ids = self.__detected_markers_ids + selected_markers_corners += (marker.corners,) + selected_markers_ids.append(marker_id) # Estimate pose of selected markers if len(selected_markers_corners) > 0: @@ -377,13 +373,13 @@ class ArUcoDetector(): # detect markers from gray picture gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY) - self.__detected_markers_corners, self.__detected_markers_ids, _ = aruco.detectMarkers(gray, self.dictionary.markers, parameters = self.parameters.internal) + detected_markers_corners, detected_markers_ids, _ = aruco.detectMarkers(gray, self.dictionary.markers, parameters = self.parameters.internal) # if all board markers are detected - if len(self.__detected_markers_corners) == expected_markers_number: + if len(detected_markers_corners) == expected_markers_number: self.__board = board - self.__board_corners_number, self.__board_corners, self.__board_corners_ids = aruco.interpolateCornersCharuco(self.__detected_markers_corners, self.__detected_markers_ids, gray, self.__board.model) + self.__board_corners_number, self.__board_corners, self.__board_corners_ids = aruco.interpolateCornersCharuco(detected_markers_corners, detected_markers_ids, gray, self.__board.model) else: -- cgit v1.1 From 9e3a8e45e11a508817ae553604932171378678b2 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 11 Oct 2023 16:06:54 +0200 Subject: Smoothing marker corners if required to stabilize pose estimation. --- src/argaze/ArUcoMarkers/ArUcoDetector.py | 44 +++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 9 deletions(-) diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py index 3ef7fa6..9e40561 100644 --- a/src/argaze/ArUcoMarkers/ArUcoDetector.py +++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py @@ -131,24 +131,27 @@ class DetectorParameters(): @dataclass class ArUcoDetector(): - """ArUco markers detector.""" + """ArUco markers detector. + + Parameters: + dictionary: ArUco markers dictionary to detect. + marker_size: Size of ArUco markers to detect in centimeter. + optic_parameters: Optic parameters to use for ArUco detection into image. + parameters: ArUco detector parameters. + smooth_marker_corners: Enable marker corners smoothing to stabilize pose estimation. + """ dictionary: ArUcoMarkersDictionary.ArUcoMarkersDictionary = field(default_factory=ArUcoMarkersDictionary.ArUcoMarkersDictionary) - """ArUco markers dictionary to detect.""" - marker_size: float = field(default=0.) - """Size of ArUco markers to detect in centimeter.""" - optic_parameters: ArUcoOpticCalibrator.OpticParameters = field(default_factory=ArUcoOpticCalibrator.OpticParameters) - """Optic parameters to use for ArUco detection into image.""" - parameters: DetectorParameters = field(default_factory=DetectorParameters) - """ArUco detector parameters.""" + smooth_marker_corners: bool = field(default=False) def __post_init__(self): # Init detected markers data self.__detected_markers = {} + self.__last_detected_markers = {} # Init detected board data self.__board = None @@ -264,6 +267,9 @@ class ArUcoDetector(): detection time: marker detection time in ms. """ + # Copy last detected markers + self.__last_detected_markers = self.__detected_markers + # Reset detected markers data self.__detected_markers, detected_markers_corners, detected_markers_ids = {}, [], [] @@ -289,7 +295,27 @@ class ArUcoDetector(): marker = ArUcoMarker.ArUcoMarker(self.dictionary, marker_id, self.marker_size) - marker.corners = detected_markers_corners[i] + # Smooth marker corners if required + if self.smooth_marker_corners: + + # Try to smooth corners with last detected markers corners + try: + + # Smooth corners positions if the distance between new marker and last marker is lower than half marker size + half_marker_size_px = numpy.linalg.norm(detected_markers_corners[i][0][1] - detected_markers_corners[i][0][0]) / 2 + distance_to_last = numpy.linalg.norm(detected_markers_corners[i] - self.__last_detected_markers[marker_id].corners) + smooth_factor = 0. if distance_to_last > half_marker_size_px else (half_marker_size_px - distance_to_last) / half_marker_size_px + + marker.corners = numpy.rint(self.__last_detected_markers[marker_id].corners * smooth_factor + detected_markers_corners[i] * (1 - smooth_factor)) + + # Avoid smoothing if the marker was not part of last detection + except KeyError: + + marker.corners = detected_markers_corners[i] + + else: + + marker.corners = detected_markers_corners[i] # No pose estimation: call estimate_markers_pose to get one marker.translation = numpy.empty([0]) -- cgit v1.1 From 5f902cd2f41aa84267e2e27e53229268d8e4d579 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 11 Oct 2023 22:57:50 +0200 Subject: Major pose estimation improvement using SolvePnP algorithm. --- src/argaze/ArUcoMarkers/ArUcoCamera.py | 5 +- src/argaze/ArUcoMarkers/ArUcoDetector.py | 64 +----- src/argaze/ArUcoMarkers/ArUcoMarker.py | 2 +- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 327 +++++---------------------- src/argaze/ArUcoMarkers/ArUcoScene.py | 31 +-- 5 files changed, 66 insertions(+), 363 deletions(-) diff --git a/src/argaze/ArUcoMarkers/ArUcoCamera.py b/src/argaze/ArUcoMarkers/ArUcoCamera.py index b850dde..ed6c619 100644 --- a/src/argaze/ArUcoMarkers/ArUcoCamera.py +++ b/src/argaze/ArUcoMarkers/ArUcoCamera.py @@ -189,11 +189,8 @@ class ArUcoCamera(ArFeatures.ArCamera): try: - # Estimate scene markers poses - self.aruco_detector.estimate_markers_pose(scene.aruco_markers_group.identifiers) - # Estimate scene pose from detected scene markers - tvec, rmat, _, _ = scene.estimate_pose(self.aruco_detector.detected_markers) + tvec, rmat, _ = scene.estimate_pose(self.aruco_detector.detected_markers) # Project scene into camera frame according estimated pose for layer_name, layer_projection in scene.project(tvec, rmat, self.visual_hfov, self.visual_vfov): diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py index 9e40561..f178a20 100644 --- a/src/argaze/ArUcoMarkers/ArUcoDetector.py +++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py @@ -138,20 +138,17 @@ class ArUcoDetector(): marker_size: Size of ArUco markers to detect in centimeter. optic_parameters: Optic parameters to use for ArUco detection into image. parameters: ArUco detector parameters. - smooth_marker_corners: Enable marker corners smoothing to stabilize pose estimation. """ dictionary: ArUcoMarkersDictionary.ArUcoMarkersDictionary = field(default_factory=ArUcoMarkersDictionary.ArUcoMarkersDictionary) marker_size: float = field(default=0.) optic_parameters: ArUcoOpticCalibrator.OpticParameters = field(default_factory=ArUcoOpticCalibrator.OpticParameters) parameters: DetectorParameters = field(default_factory=DetectorParameters) - smooth_marker_corners: bool = field(default=False) def __post_init__(self): # Init detected markers data self.__detected_markers = {} - self.__last_detected_markers = {} # Init detected board data self.__board = None @@ -267,9 +264,6 @@ class ArUcoDetector(): detection time: marker detection time in ms. """ - # Copy last detected markers - self.__last_detected_markers = self.__detected_markers - # Reset detected markers data self.__detected_markers, detected_markers_corners, detected_markers_ids = {}, [], [] @@ -294,28 +288,7 @@ class ArUcoDetector(): for i, marker_id in enumerate(detected_markers_ids): marker = ArUcoMarker.ArUcoMarker(self.dictionary, marker_id, self.marker_size) - - # Smooth marker corners if required - if self.smooth_marker_corners: - - # Try to smooth corners with last detected markers corners - try: - - # Smooth corners positions if the distance between new marker and last marker is lower than half marker size - half_marker_size_px = numpy.linalg.norm(detected_markers_corners[i][0][1] - detected_markers_corners[i][0][0]) / 2 - distance_to_last = numpy.linalg.norm(detected_markers_corners[i] - self.__last_detected_markers[marker_id].corners) - smooth_factor = 0. if distance_to_last > half_marker_size_px else (half_marker_size_px - distance_to_last) / half_marker_size_px - - marker.corners = numpy.rint(self.__last_detected_markers[marker_id].corners * smooth_factor + detected_markers_corners[i] * (1 - smooth_factor)) - - # Avoid smoothing if the marker was not part of last detection - except KeyError: - - marker.corners = detected_markers_corners[i] - - else: - - marker.corners = detected_markers_corners[i] + marker.corners = detected_markers_corners[i][0] # No pose estimation: call estimate_markers_pose to get one marker.translation = numpy.empty([0]) @@ -329,41 +302,6 @@ class ArUcoDetector(): return detection_time - def estimate_markers_pose(self, markers_ids: list = []): - """Estimate pose of current detected markers or of given markers id list.""" - - # Is there detected markers ? - if len(self.__detected_markers) > 0: - - # Select all markers by default - if len(markers_ids) == 0: - - markers_ids = self.__detected_markers.keys() - - # Prepare data for aruco.estimatePoseSingleMarkers function - selected_markers_corners = tuple() - selected_markers_ids = [] - - for marker_id, marker in self.__detected_markers.items(): - - if marker_id in markers_ids: - - selected_markers_corners += (marker.corners,) - selected_markers_ids.append(marker_id) - - # Estimate pose of selected markers - if len(selected_markers_corners) > 0: - - markers_rvecs, markers_tvecs, markers_points = aruco.estimatePoseSingleMarkers(selected_markers_corners, self.marker_size, numpy.array(self.optic_parameters.K), numpy.array(self.optic_parameters.D)) - - for i, marker_id in enumerate(selected_markers_ids): - - marker = self.__detected_markers[marker_id] - - marker.translation = markers_tvecs[i][0] - marker.rotation, _ = cv.Rodrigues(markers_rvecs[i][0]) - marker.points = markers_points.reshape(4, 3) - @property def detected_markers(self) -> dict[ArUcoMarkerType]: """Access to detected markers dictionary.""" diff --git a/src/argaze/ArUcoMarkers/ArUcoMarker.py b/src/argaze/ArUcoMarkers/ArUcoMarker.py index 57bd8bd..f088dae 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarker.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarker.py @@ -68,7 +68,7 @@ class ArUcoMarker(): # Draw marker if required if color is not None: - aruco.drawDetectedMarkers(image, [self.corners], numpy.array([self.identifier]), color) + aruco.drawDetectedMarkers(image, [numpy.array([list(self.corners)])], numpy.array([self.identifier]), color) # Draw marker axes if pose has been estimated and if required if self.translation.size == 3 and self.rotation.size == 9 and draw_axes is not None: diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index 4a43965..df390b4 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -17,8 +17,7 @@ import re from argaze.ArUcoMarkers import ArUcoMarkersDictionary, ArUcoMarker, ArUcoOpticCalibrator import numpy -import cv2 as cv -import cv2.aruco as aruco +import cv2 T0 = numpy.array([0., 0., 0.]) """Define no translation vector.""" @@ -58,37 +57,16 @@ def is_rotation_matrix(R): return n < 1e-3 -def make_euler_rotation_vector(R): - - assert(is_rotation_matrix(R)) - - sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0]) - - singular = sy < 1e-6 - - if not singular : - x = math.atan2(R[2,1] , R[2,2]) - y = math.atan2(-R[2,0], sy) - z = math.atan2(R[1,0], R[0,0]) - else : - x = math.atan2(-R[1,2], R[1,1]) - y = math.atan2(-R[2,0], sy) - z = 0 - - return numpy.array([numpy.rad2deg(x), numpy.rad2deg(y), numpy.rad2deg(z)]) - @dataclass(frozen=True) class Place(): - """Define a place as a pose and a marker. + """Define a place as list of corners position and a marker. Parameters: - translation: position in group referential. - rotation: rotation in group referential. + corners: 3D corners position in group referential. marker: ArUco marker linked to the place. """ - translation: numpy.array - rotation: numpy.array + corners: numpy.array marker: dict @dataclass @@ -146,12 +124,16 @@ class ArUcoMarkersGroup(): new_marker = ArUcoMarker.ArUcoMarker(self.dictionary, identifier, self.marker_size) - new_places[identifier] = Place(tvec, rmat, new_marker) + # Build marker corners thanks to translation vector and rotation matrix + place_corners = numpy.array([[-self.marker_size/2, self.marker_size/2, 0], [self.marker_size/2, self.marker_size/2, 0], [self.marker_size/2, -self.marker_size/2, 0], [-self.marker_size/2, -self.marker_size/2, 0]]) + place_corners = place_corners.dot(rmat) + tvec + + new_places[identifier] = Place(place_corners, new_marker) - # else places are configured using detected markers + # else places are configured using detected markers estimated points elif isinstance(data, ArUcoMarker.ArUcoMarker): - new_places[identifier] = Place(data.translation, data.rotation, data) + new_places[identifier] = Place(data.points, data) # else places are already at expected format elif (type(identifier) == int) and isinstance(data, Place): @@ -160,9 +142,6 @@ class ArUcoMarkersGroup(): self.places = new_places - # Init place consistency - self.init_places_consistency() - @classmethod def from_obj(self, obj_filepath: str) -> ArUcoMarkersGroupType: """Load ArUco markers group from .obj file. @@ -264,28 +243,16 @@ class ArUcoMarkersGroup(): # Retreive marker vertices thanks to face vertice ids for identifier, face in faces.items(): - # Gather place corners from counter clockwise ordered face vertices - corners = numpy.array([ vertices[i-1] for i in face ]) - - # Edit translation (Tp) allowing to move world axis (W) at place axis (P) - Tp = corners.mean(axis=0) + # Gather place corners in clockwise order + cw_corners = numpy.array([ vertices[i-1] for i in reversed(face) ]) # Edit place axis from corners positions - place_x_axis = corners[1:3].mean(axis=0) - Tp + place_x_axis = cw_corners[1:3].mean(axis=0) place_x_axis_norm = numpy.linalg.norm(place_x_axis) - place_x_axis = place_x_axis / place_x_axis_norm - - place_y_axis = corners[2:4].mean(axis=0) - Tp + + place_y_axis = cw_corners[2:4].mean(axis=0) place_y_axis_norm = numpy.linalg.norm(place_y_axis) - place_y_axis = place_y_axis / place_y_axis_norm - place_z_axis = normals[identifier] - - # Edit rotation (Rp) allowing to transform world axis (W) into place axis (P) - W = numpy.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) - P = numpy.array([place_x_axis, place_y_axis, place_z_axis]) - Rp = W.dot(P.T) - # Check axis size: they should be almost equal if math.isclose(place_x_axis_norm, place_y_axis_norm, rel_tol=1e-3): @@ -302,7 +269,7 @@ class ArUcoMarkersGroup(): # Create a new place related to a new marker new_marker = ArUcoMarker.ArUcoMarker(new_dictionary, identifier, new_marker_size) - new_places[identifier] = Place(Tp, Rp, new_marker) + new_places[identifier] = Place(cw_corners, new_marker) except IOError: raise IOError(f'File not found: {obj_filepath}') @@ -337,18 +304,7 @@ class ArUcoMarkersGroup(): output += '\n\n\tPlaces:' for identifier, place in self.places.items(): output += f'\n\t\t- {identifier}:' - output += f'\n{place.translation}' - output += f'\n{place.rotation}' - - output += '\n\n\tAngle cache:' - for A_identifier, A_angle_cache in self.__rotation_cache.items(): - for B_identifier, angle in A_angle_cache.items(): - output += f'\n\t\t- {A_identifier}/{B_identifier}: [{angle[0]:3f} {angle[1]:3f} {angle[2]:3f}]' - - output += '\n\n\tDistance cache:' - for A_identifier, A_distance_cache in self.__translation_cache.items(): - for B_identifier, distance in A_distance_cache.items(): - output += f'\n\t\t- {A_identifier}/{B_identifier}: {distance:3f}' + output += f'\n{place.corners}' return output @@ -381,148 +337,22 @@ class ArUcoMarkersGroup(): return group_markers, remaining_markers - def init_places_consistency(self): - """Initialize places consistency to speed up further markers consistency checking.""" - - # Process expected rotation between places combinations to speed up further calculations - self.__rotation_cache = {} - for (A_identifier, A_place), (B_identifier, B_place) in itertools.combinations(self.places.items(), 2): - - A = self.places[A_identifier].rotation - B = self.places[B_identifier].rotation - - if numpy.array_equal(A, B): - - AB_rvec = [0., 0., 0.] - BA_rvec = [0., 0., 0.] - - else: - - # Calculate euler angle representation of AB and BA rotation matrix - AB_rvec = make_euler_rotation_vector(B.dot(A.T)) - BA_rvec = make_euler_rotation_vector(A.dot(B.T)) - - try: - self.__rotation_cache[A_identifier][B_identifier] = AB_rvec - except: - self.__rotation_cache[A_identifier] = {B_identifier: AB_rvec} - - try: - self.__rotation_cache[B_identifier][A_identifier] = BA_rvec - except: - self.__rotation_cache[B_identifier] = {A_identifier: BA_rvec} - - # Process translation between each places combinations to speed up further calculations - self.__translation_cache = {} - for (A_identifier, A_place), (B_identifier, B_place) in itertools.combinations(self.places.items(), 2): - - A = self.places[A_identifier].translation - B = self.places[B_identifier].translation - - # Calculate translation between A and B position - AB_tvec = numpy.linalg.norm(B - A) - - try: - self.__translation_cache[A_identifier][B_identifier] = AB_tvec - except: - self.__translation_cache[A_identifier] = {B_identifier: AB_tvec} - - try: - self.__translation_cache[B_identifier][A_identifier] = AB_tvec - except: - self.__translation_cache[B_identifier] = {A_identifier: AB_tvec} + def estimate_pose_from_markers_corners(self, markers: dict, K: numpy.array, D: numpy.array) -> Tuple[bool, numpy.array, numpy.array]: + """Estimate pose from markers corners and places corners. - def check_markers_consistency(self, group_markers: dict, angle_tolerance: float, distance_tolerance: float) -> Tuple[dict, dict, dict]: - """Evaluate if given markers configuration match related places configuration. + Parameters: + markers: detected markers to use for pose estimation. + K: intrinsic camera parameters + D: camera distorsion matrix Returns: - dict of consistent markers - dict of unconsistent markers - dict of identified distance or angle unconsistencies and out-of-bounds values + success: True if the pose estimation succeeded + tvec: scene translation vector + rvec: scene rotation vector """ - consistent_markers = {} - unconsistencies = {'rotation': {}, 'translation': {}} - - for (A_identifier, A_marker), (B_identifier, B_marker) in itertools.combinations(group_markers.items(), 2): - - try: - - # Rotation matrix from A marker to B marker - AB = B_marker.rotation.dot(A_marker.rotation.T) - - # Calculate euler angle representation of AB rotation matrix - AB_rvec = make_euler_rotation_vector(AB) - expected_rvec= self.__rotation_cache[A_identifier][B_identifier] - - # Calculate distance between A marker center and B marker center - AB_tvec = numpy.linalg.norm(A_marker.translation - B_marker.translation) - expected_tvec = self.__translation_cache[A_identifier][B_identifier] - - # Check angle and distance according given tolerance then normalise marker pose - consistent_rotation = numpy.allclose(AB_rvec, expected_rvec, atol=angle_tolerance) - consistent_translation = math.isclose(AB_tvec, expected_tvec, abs_tol=distance_tolerance) - - if consistent_rotation and consistent_translation: - - if A_identifier not in consistent_markers.keys(): - - # Remember this marker is already validated - consistent_markers[A_identifier] = A_marker - - if B_identifier not in consistent_markers.keys(): - - # Remember this marker is already validated - consistent_markers[B_identifier] = B_marker - - else: - - if not consistent_rotation: - unconsistencies['rotation'][f'{A_identifier}/{B_identifier}'] = {'current': AB_rvec, 'expected': expected_rvec} - - if not consistent_translation: - unconsistencies['translation'][f'{A_identifier}/{B_identifier}'] = {'current': AB_tvec, 'expected': expected_tvec} - - except KeyError: - - raise ValueError(f'Marker {A_identifier} or {B_identifier} don\'t belong to the group.') - - # Gather unconsistent markers - unconsistent_markers = {} - - for identifier, marker in group_markers.items(): - - if identifier not in consistent_markers.keys(): - - unconsistent_markers[identifier] = marker - - return consistent_markers, unconsistent_markers, unconsistencies - - def estimate_pose_from_single_marker(self, marker: ArUcoMarker.ArUcoMarker) -> Tuple[numpy.array, numpy.array]: - """Calculate rotation and translation that move a marker to its place.""" - - # Get the place related to the given marker - try: - - place = self.places[marker.identifier] - - # Rotation matrix that transform marker to related place - self._rotation = marker.rotation.dot(place.rotation.T) - - # Translation vector that transform marker to related place - self._translation = marker.translation - place.translation.dot(place.rotation).dot(marker.rotation.T) - - return self._translation, self._rotation - - except KeyError: - - raise ValueError(f'Marker {marker.identifier} doesn\'t belong to the group.') - - def estimate_pose_from_markers(self, markers: dict) -> Tuple[numpy.array, numpy.array]: - """Calculate average rotation and translation that move markers to their related places.""" - - rotations = [] - translations = [] + markers_corners_2d = [] + places_corners_3d = [] for identifier, marker in markers.items(): @@ -530,72 +360,23 @@ class ArUcoMarkersGroup(): place = self.places[identifier] - # Rotation matrix that transform marker to related place - R = marker.rotation.dot(place.rotation.T) - - # Translation vector that transform marker to related place - T = marker.translation - place.translation.dot(place.rotation).dot(marker.rotation.T) + for marker_corner in marker.corners: + markers_corners_2d.append(list(marker_corner)) - rotations.append(R) - translations.append(T) + for place_corner in place.corners: + places_corners_3d.append(list(place_corner)) except KeyError: raise ValueError(f'Marker {marker.identifier} doesn\'t belong to the group.') - # Consider ArUcoMarkersGroup rotation as the mean of all marker rotations - # !!! WARNING !!! This is a bad hack : processing rotations average is a very complex problem that needs to well define the distance calculation method before. - self._rotation = numpy.mean(numpy.array(rotations), axis=0) - - # Consider ArUcoMarkersGroup translation as the mean of all marker translations - self._translation = numpy.mean(numpy.array(translations), axis=0) - - return self._translation, self._rotation - - def estimate_pose_from_axis_markers(self, origin_marker: ArUcoMarker.ArUcoMarker, horizontal_axis_marker: ArUcoMarker.ArUcoMarker, vertical_axis_marker: ArUcoMarker.ArUcoMarker) -> Tuple[numpy.array, numpy.array]: - """Calculate rotation and translation from 3 markers defining an orthogonal axis.""" - - O_marker = origin_marker - A_marker = horizontal_axis_marker - B_marker = vertical_axis_marker - - O_place = self.places[O_marker.identifier] - A_place = self.places[A_marker.identifier] - B_place = self.places[B_marker.identifier] - - # Place axis - OA = A_place.translation - O_place.translation - OA = OA / numpy.linalg.norm(OA) - - OB = B_place.translation - O_place.translation - OB = OB / numpy.linalg.norm(OB) - - # Detect and correct bad place axis orientation - X_sign = numpy.sign(OA)[0] - Y_sign = numpy.sign(OB)[1] - - P = numpy.array([OA*X_sign, OB*Y_sign, numpy.cross(OA*X_sign, OB*Y_sign)]) - - # Marker axis - OA = A_marker.translation - O_marker.translation - OA = OA / numpy.linalg.norm(OA) - - OB = B_marker.translation - O_marker.translation - OB = OB / numpy.linalg.norm(OB) - - # Detect and correct bad place axis orientation - X_sign = numpy.sign(OA)[0] - Y_sign = -numpy.sign(OB)[1] - - M = numpy.array([OA*X_sign, OB*Y_sign, numpy.cross(OA*X_sign, OB*Y_sign)]) - - # Then estimate ArUcoMarkersGroup rotation - self._rotation = P.dot(M.T) + # Solve + success, rvec, tvec = cv2.solvePnP(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), flags=0) - # Consider ArUcoMarkersGroup translation as the translation of the marker at axis origin - self._translation = O_marker.translation - O_place.translation.dot(O_place.rotation).dot(M.T) + self._translation = tvec.T + self._rotation = rvec.T - return self._translation, self._rotation + return success, self._translation, self._rotation @property def translation(self) -> numpy.array: @@ -624,15 +405,15 @@ class ArUcoMarkersGroup(): try: axisPoints = numpy.float32([[length, 0, 0], [0, length, 0], [0, 0, length], [0, 0, 0]]).reshape(-1, 3) - axisPoints, _ = cv.projectPoints(axisPoints, self._rotation, self._translation, numpy.array(K), numpy.array(D)) + axisPoints, _ = cv2.projectPoints(axisPoints, self._rotation, self._translation, numpy.array(K), numpy.array(D)) axisPoints = axisPoints.astype(int) - cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (0, 0, 255), thickness) # X (red) - cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (0, 255, 0), thickness) # Y (green) - cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (255, 0, 0), thickness) # Z (blue) + cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (0, 0, 255), thickness) # X (red) + cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (0, 255, 0), thickness) # Y (green) + cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (255, 0, 0), thickness) # Z (blue) # Ignore errors due to out of field axis: their coordinate are larger than int32 limitations. - except cv.error: + except cv2.error: pass def draw_places(self, image: numpy.array, K, D, color: tuple = None, border_size: int = 0): @@ -648,16 +429,16 @@ class ArUcoMarkersGroup(): R = self.places[identifier].rotation placePoints = (T + numpy.float32([R.dot([-l, -l, 0]), R.dot([l, -l, 0]), R.dot([l, l, 0]), R.dot([-l, l, 0])])).reshape(-1, 3) - placePoints, _ = cv.projectPoints(placePoints, self._rotation, self._translation, numpy.array(K), numpy.array(D)) + placePoints, _ = cv2.projectPoints(placePoints, self._rotation, self._translation, numpy.array(K), numpy.array(D)) placePoints = placePoints.astype(int) - cv.line(image, tuple(placePoints[0].ravel()), tuple(placePoints[1].ravel()), color, border_size) - cv.line(image, tuple(placePoints[1].ravel()), tuple(placePoints[2].ravel()), color, border_size) - cv.line(image, tuple(placePoints[2].ravel()), tuple(placePoints[3].ravel()), color, border_size) - cv.line(image, tuple(placePoints[3].ravel()), tuple(placePoints[0].ravel()), color, border_size) + cv2.line(image, tuple(placePoints[0].ravel()), tuple(placePoints[1].ravel()), color, border_size) + cv2.line(image, tuple(placePoints[1].ravel()), tuple(placePoints[2].ravel()), color, border_size) + cv2.line(image, tuple(placePoints[2].ravel()), tuple(placePoints[3].ravel()), color, border_size) + cv2.line(image, tuple(placePoints[3].ravel()), tuple(placePoints[0].ravel()), color, border_size) # Ignore errors due to out of field places: their coordinate are larger than int32 limitations. - except cv.error: + except cv2.error: pass def draw_places_axes(self, image: numpy.array, K, D, thickness: int = 0, length: float = 0): @@ -671,15 +452,15 @@ class ArUcoMarkersGroup(): R = self.places[identifier].rotation axisPoints = (T + numpy.float32([R.dot([length, 0, 0]), R.dot([0, length, 0]), R.dot([0, 0, length]), R.dot([0, 0, 0])])).reshape(-1, 3) - axisPoints, _ = cv.projectPoints(axisPoints, self._rotation, self._translation, numpy.array(K), numpy.array(D)) + axisPoints, _ = cv2.projectPoints(axisPoints, self._rotation, self._translation, numpy.array(K), numpy.array(D)) axisPoints = axisPoints.astype(int) - cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (0, 0, 255), thickness) # X (red) - cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (0, 255, 0), thickness) # Y (green) - cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (255, 0, 0), thickness) # Z (blue) + cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (0, 0, 255), thickness) # X (red) + cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (0, 255, 0), thickness) # Y (green) + cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (255, 0, 0), thickness) # Z (blue) # Ignore errors due to out of field places: their coordinate are larger than int32 limitations. - except cv.error: + except cv2.error: pass def draw(self, image: numpy.array, K, D, draw_axes: dict = None, draw_places: dict = None, draw_places_axes: dict = None): diff --git a/src/argaze/ArUcoMarkers/ArUcoScene.py b/src/argaze/ArUcoMarkers/ArUcoScene.py index f6b303a..b8b9cfd 100644 --- a/src/argaze/ArUcoMarkers/ArUcoScene.py +++ b/src/argaze/ArUcoMarkers/ArUcoScene.py @@ -96,14 +96,13 @@ class ArUcoScene(ArFeatures.ArScene): # Create new aruco scene using temporary ar scene values return ArUcoScene(aruco_markers_group=new_aruco_markers_group, **temp_ar_scene_values) - def estimate_pose(self, detected_markers) -> Tuple[numpy.array, numpy.array, str, dict]: + def estimate_pose(self, detected_markers) -> Tuple[numpy.array, numpy.array, dict]: """Estimate scene pose from detected ArUco markers. Returns: - scene translation vector - scene rotation matrix - pose estimation strategy - dict of markers used to estimate the pose + scene translation vector + scene rotation matrix + dict of markers used to estimate the pose """ # Pose estimation fails when no marker is detected @@ -118,26 +117,14 @@ class ArUcoScene(ArFeatures.ArScene): raise ArFeatures.PoseEstimationFailed('No marker belongs to the scene') - # Estimate scene pose from unique marker transformations - elif len(scene_markers) == 1: + # Estimate pose from a markers corners + success, tvec, rmat = self.aruco_markers_group.estimate_pose_from_markers_corners(scene_markers, self.parent.aruco_detector.optic_parameters.K, self.parent.aruco_detector.optic_parameters.D) - marker_id, marker = scene_markers.popitem() - tvec, rmat = self.aruco_markers_group.estimate_pose_from_single_marker(marker) - - return tvec, rmat, 'estimate_pose_from_single_marker', {marker_id: marker} + if not success: - # Otherwise, check markers consistency - consistent_markers, unconsistent_markers, unconsistencies = self.aruco_markers_group.check_markers_consistency(scene_markers, self.angle_tolerance, self.distance_tolerance) + raise ArFeatures.PoseEstimationFailed('Can\'t estimate pose from markers corners positions') - # Pose estimation fails when no marker passes consistency checking - if len(consistent_markers) == 0: - - raise ArFeatures.PoseEstimationFailed('Unconsistent marker poses', unconsistencies) - - # Otherwise, estimate scene pose from all consistent markers pose - tvec, rmat = self.aruco_markers_group.estimate_pose_from_markers(consistent_markers) - - return tvec, rmat, 'estimate_pose_from_markers', consistent_markers + return tvec, rmat, scene_markers def draw(self, image: numpy.array, draw_aruco_markers_group: dict = None): """ -- cgit v1.1 From a1ee2b893cce70ba03fbba1c12d9d0829e3e9632 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Thu, 12 Oct 2023 21:26:27 +0200 Subject: Adding estimate_markers_pose method to ArUcoDetector. --- src/argaze/ArUcoMarkers/ArUcoDetector.py | 36 ++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py index f178a20..01527a1 100644 --- a/src/argaze/ArUcoMarkers/ArUcoDetector.py +++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py @@ -302,6 +302,42 @@ class ArUcoDetector(): return detection_time + def estimate_markers_pose(self, markers_ids: list = []): + """Estimate pose of current detected markers or of given markers id list.""" + + # Is there detected markers ? + if len(self.__detected_markers) > 0: + + # Select all markers by default + if len(markers_ids) == 0: + + markers_ids = self.__detected_markers.keys() + + # Prepare data for aruco.estimatePoseSingleMarkers function + selected_markers_corners = tuple() + selected_markers_ids = [] + + for marker_id, marker in self.__detected_markers.items(): + + if marker_id in markers_ids: + + selected_markers_corners += (marker.corners,) + selected_markers_ids.append(marker_id) + + # Estimate pose of selected markers + if len(selected_markers_corners) > 0: + + markers_rvecs, markers_tvecs, markers_points = aruco.estimatePoseSingleMarkers(selected_markers_corners, self.marker_size, numpy.array(self.optic_parameters.K), numpy.array(self.optic_parameters.D)) + + for i, marker_id in enumerate(selected_markers_ids): + + marker = self.__detected_markers[marker_id] + + marker.translation = markers_tvecs[i][0] + marker.rotation, _ = cv.Rodrigues(markers_rvecs[i][0]) + + marker.points = markers_points.reshape(4, 3).dot(marker.rotation) + marker.translation + @property def detected_markers(self) -> dict[ArUcoMarkerType]: """Access to detected markers dictionary.""" -- cgit v1.1 From 673c21df7f231f5acd05311445686d0e521d5e7a Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Thu, 12 Oct 2023 21:26:59 +0200 Subject: minor annotation change. --- src/argaze/ArUcoMarkers/ArUcoMarker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/argaze/ArUcoMarkers/ArUcoMarker.py b/src/argaze/ArUcoMarkers/ArUcoMarker.py index f088dae..0f368f6 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarker.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarker.py @@ -29,7 +29,7 @@ class ArUcoMarker(): """Size of marker in centimeters.""" corners: numpy.array = field(init=False, repr=False) - """Estimated 2D corner positions in camera image referential.""" + """Estimated 2D corners position in camera image referential.""" translation: numpy.array = field(init=False, repr=False) """Estimated 3D center position in camera world referential.""" -- cgit v1.1 From f26058148061f80eb4bb3fe16d6a24c910bf8bd5 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Thu, 12 Oct 2023 21:27:31 +0200 Subject: Fixing ArUcoMarkersGroup.to_obj method. --- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index df390b4..bedd408 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -497,26 +497,24 @@ class ArUcoMarkersGroup(): v_count = 0 - for identifier, place in self.places.items(): + for p, (identifier, place) in enumerate(self.places.items()): file.write(f'o {self.dictionary.name}#{identifier}_Marker\n') vertices = '' - T = place.translation - R = place.rotation - - points = (T + numpy.float32([R.dot(place.marker.points[0]), R.dot(place.marker.points[1]), R.dot(place.marker.points[2]), R.dot(place.marker.points[3])])).reshape(-1, 3) - - print(points) - # Write vertices in reverse order - for i in [3, 2, 1, 0]: + for v in [3, 2, 1, 0]: - file.write(f'v {" ".join(map(str, points[i]))}\n') + file.write(f'v {" ".join(map(str, place.corners[v]))}\n') v_count += 1 - vertices += f' {v_count}' + vertices += f' {v_count}//{p+1}' + + # Write normal vector + nvec = numpy.cross(place.corners[-1] - place.corners[0], place.corners[1] - place.corners[0]) + nvec = nvec / numpy.linalg.norm(nvec) + file.write(f'vn {nvec[0]} {nvec[1]} {nvec[2]}\n') - file.write('s off\n') + #file.write('s off\n') file.write(f'f{vertices}\n') -- cgit v1.1 From a2960562b3a66f610d8d7d8e80faedc2fff024b8 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Mon, 16 Oct 2023 12:19:05 +0200 Subject: Improving DeviationcircleCoverage module. Outputting probabilities instead of coverages values. --- src/argaze/GazeAnalysis/DeviationCircleCoverage.py | 27 +++++++++++----------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py index f57d432..acc0665 100644 --- a/src/argaze/GazeAnalysis/DeviationCircleCoverage.py +++ b/src/argaze/GazeAnalysis/DeviationCircleCoverage.py @@ -33,8 +33,8 @@ class AOIMatcher(GazeFeatures.AOIMatcher): self.__look_count = 0 self.__looked_aoi_data = (None, None) + self.__looked_probabilities = {} self.__circle_ratio_sum = {} - self.__aoi_coverages = {} self.__matched_gaze_movement = None self.__matched_region = None @@ -54,7 +54,7 @@ class AOIMatcher(GazeFeatures.AOIMatcher): # BAD: we use deviation_max attribute which is an attribute of DispersionThresholdIdentification.Fixation class region, _, circle_ratio = aoi.circle_intersection(gaze_movement.focus, gaze_movement.deviation_max) - if name not in self.exclude and circle_ratio > 0: + if name not in self.exclude and circle_ratio > self.coverage_threshold: # Sum circle ratio to update aoi coverage try: @@ -78,15 +78,15 @@ class AOIMatcher(GazeFeatures.AOIMatcher): # Update looked aoi data self.__looked_aoi_data = most_likely_looked_aoi_data - # Calculate looked aoi circle ratio means - self.__aoi_coverages = {} + # Calculate circle ratio means as looked probabilities + self.__looked_probabilities = {} for aoi_name, circle_ratio_sum in self.__circle_ratio_sum.items(): circle_ratio_mean = circle_ratio_sum / self.__look_count - # filter circle ration mean greater than 1 - self.__aoi_coverages[aoi_name] = circle_ratio_mean if circle_ratio_mean < 1 else 1 + # Avoid probability greater than 1 + self.__looked_probabilities[aoi_name] = circle_ratio_mean if circle_ratio_mean < 1 else 1 # Update matched gaze movement self.__matched_gaze_movement = gaze_movement @@ -95,9 +95,7 @@ class AOIMatcher(GazeFeatures.AOIMatcher): self.__matched_region = matched_region # Return - if self.__aoi_coverages[most_likely_looked_aoi_data[0]] > self.coverage_threshold: - - return self.__looked_aoi_data + return self.__looked_aoi_data elif GazeFeatures.is_saccade(gaze_movement): @@ -173,8 +171,11 @@ class AOIMatcher(GazeFeatures.AOIMatcher): return self.__looked_aoi_data[0] @property - def aoi_coverages(self) -> dict: - """Get all aoi coverage means for current fixation. - It represents the ratio of fixation deviation circle surface that used to cover the aoi.""" + def looked_probabilities(self) -> dict: + """Get probabilities to be looked by current fixation for each aoi. + + !!! note + aoi where fixation deviation circle never passed the coverage threshold will be missing. + """ - return self.__aoi_coverages \ No newline at end of file + return self.__looked_probabilities \ No newline at end of file -- cgit v1.1 From f18c8dc95e1016f0783808fd5ab531fee62f4998 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Mon, 16 Oct 2023 16:49:58 +0200 Subject: Failing to estimate pose when only one marker belongs to the scene. --- src/argaze/ArUcoMarkers/ArUcoScene.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/argaze/ArUcoMarkers/ArUcoScene.py b/src/argaze/ArUcoMarkers/ArUcoScene.py index b8b9cfd..51dd88c 100644 --- a/src/argaze/ArUcoMarkers/ArUcoScene.py +++ b/src/argaze/ArUcoMarkers/ArUcoScene.py @@ -117,6 +117,11 @@ class ArUcoScene(ArFeatures.ArScene): raise ArFeatures.PoseEstimationFailed('No marker belongs to the scene') + # Pose estimation fails if only one marker belongs to the scene + if len(scene_markers) == 1: + + raise ArFeatures.PoseEstimationFailed('Only one marker belongs to the scene') + # Estimate pose from a markers corners success, tvec, rmat = self.aruco_markers_group.estimate_pose_from_markers_corners(scene_markers, self.parent.aruco_detector.optic_parameters.K, self.parent.aruco_detector.optic_parameters.D) -- cgit v1.1 From 0835382b4b12652e23dcebe2456431c3e625dc3a Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Mon, 16 Oct 2023 19:32:15 +0200 Subject: Fixing marker size guessing. --- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index bedd408..d30a730 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -247,16 +247,16 @@ class ArUcoMarkersGroup(): cw_corners = numpy.array([ vertices[i-1] for i in reversed(face) ]) # Edit place axis from corners positions - place_x_axis = cw_corners[1:3].mean(axis=0) + place_x_axis = cw_corners[2] - cw_corners[3] place_x_axis_norm = numpy.linalg.norm(place_x_axis) - place_y_axis = cw_corners[2:4].mean(axis=0) + place_y_axis = cw_corners[0] - cw_corners[3] place_y_axis_norm = numpy.linalg.norm(place_y_axis) # Check axis size: they should be almost equal if math.isclose(place_x_axis_norm, place_y_axis_norm, rel_tol=1e-3): - current_marker_size = place_x_axis_norm*2 + current_marker_size = place_x_axis_norm # Check that all markers size are almost equal if new_marker_size > 0: -- cgit v1.1 From 4c9abeb88568d3725f47ecd1a47ddf74767157e2 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Mon, 16 Oct 2023 22:37:03 +0200 Subject: Using SolvePnP with flag 3. --- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index d30a730..dddead4 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -371,7 +371,7 @@ class ArUcoMarkersGroup(): raise ValueError(f'Marker {marker.identifier} doesn\'t belong to the group.') # Solve - success, rvec, tvec = cv2.solvePnP(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), flags=0) + success, rvec, tvec = cv2.solvePnP(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), flags=3) self._translation = tvec.T self._rotation = rvec.T -- cgit v1.1 From 9c6f43140bbe9a387e74a725843914439bd5c1fc Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 17 Oct 2023 08:47:41 +0200 Subject: Adding annotation. --- src/argaze/ArUcoMarkers/ArUcoDetector.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py index 01527a1..51f8366 100644 --- a/src/argaze/ArUcoMarkers/ArUcoDetector.py +++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py @@ -260,6 +260,9 @@ class ArUcoDetector(): !!! danger "DON'T MIRROR IMAGE" It makes the markers detection to fail. + !!! danger "DON'T UNDISTORED IMAGE" + Camera intrisic parameters and distorsion coefficients are used later during pose estimation. + Returns: detection time: marker detection time in ms. """ -- cgit v1.1 From 3934f14dbab4206d091f07dd29bf3ad3dfb9b787 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 17 Oct 2023 09:14:11 +0200 Subject: Adding useAruco3Detection as possible parameters. --- src/argaze/ArUcoMarkers/ArUcoDetector.py | 3 ++- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 11 +++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py index 51f8366..e62a42e 100644 --- a/src/argaze/ArUcoMarkers/ArUcoDetector.py +++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py @@ -72,7 +72,8 @@ class DetectorParameters(): 'minOtsuStdDev', 'perspectiveRemoveIgnoredMarginPerCell', 'perspectiveRemovePixelPerCell', - 'polygonalApproxAccuracyRate' + 'polygonalApproxAccuracyRate', + 'useAruco3Detection' ] def __init__(self, **kwargs): diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index dddead4..8600690 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -370,8 +370,15 @@ class ArUcoMarkersGroup(): raise ValueError(f'Marker {marker.identifier} doesn\'t belong to the group.') - # Solve - success, rvec, tvec = cv2.solvePnP(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), flags=3) + # SolvPnP using cv2.SOLVEPNP_SQPNP flag + # TODO: it works also with cv2.SOLVEPNP_EPNP flag so we need to test which is the faster. + # About SolvPnP flags: https://docs.opencv.org/4.x/d5/d1f/calib3d_solvePnP.html + success, rvec, tvec = cv2.solvePnP(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), flags=cv2.SOLVEPNP_SQPNP) + + # Refine pose estimation using Gauss-Newton optimisation + #if success : + + #rvec, tvec = cv2.solvePnPRefineVVS(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), rvec, tvec) self._translation = tvec.T self._rotation = rvec.T -- cgit v1.1 From 77914e2aa25623a237a58b7c80f712129cbb2b55 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 17 Oct 2023 09:15:51 +0200 Subject: Uncommenting pose estimation optimisation --- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index 8600690..edae927 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -376,9 +376,9 @@ class ArUcoMarkersGroup(): success, rvec, tvec = cv2.solvePnP(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), flags=cv2.SOLVEPNP_SQPNP) # Refine pose estimation using Gauss-Newton optimisation - #if success : + if success : - #rvec, tvec = cv2.solvePnPRefineVVS(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), rvec, tvec) + rvec, tvec = cv2.solvePnPRefineVVS(numpy.array(places_corners_3d), numpy.array(markers_corners_2d), numpy.array(K), numpy.array(D), rvec, tvec) self._translation = tvec.T self._rotation = rvec.T -- cgit v1.1 From 1d46c5816ba603105dfaa1b5a79f3a167fdc99d8 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 17 Oct 2023 12:56:41 +0200 Subject: Adding GazePositionCalibrator class. Adding LinearRegression module. --- setup.py | 2 +- src/argaze/GazeAnalysis/LinearRegression.py | 80 +++++++++++++++++++++++++++++ src/argaze/GazeAnalysis/__init__.py | 2 +- src/argaze/GazeFeatures.py | 56 ++++++++++++++++++++ 4 files changed, 138 insertions(+), 2 deletions(-) create mode 100644 src/argaze/GazeAnalysis/LinearRegression.py diff --git a/setup.py b/setup.py index 358c19e..706f414 100644 --- a/setup.py +++ b/setup.py @@ -35,7 +35,7 @@ setup( packages=find_packages(where='src'), python_requires='>=3.11', - install_requires=['opencv-python>=4.7.0', 'opencv-contrib-python>=4.7.0', 'numpy', 'pandas', 'matplotlib', 'shapely', 'lempel_ziv_complexity', 'scipy'], + install_requires=['opencv-python>=4.7.0', 'opencv-contrib-python>=4.7.0', 'numpy', 'pandas', 'matplotlib', 'shapely', 'lempel_ziv_complexity', 'scipy', 'scikit-learn'], project_urls={ 'Bug Reports': 'https://git.recherche.enac.fr/projects/argaze/issues', diff --git a/src/argaze/GazeAnalysis/LinearRegression.py b/src/argaze/GazeAnalysis/LinearRegression.py new file mode 100644 index 0000000..5a92048 --- /dev/null +++ b/src/argaze/GazeAnalysis/LinearRegression.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python + +"""Module for gaze position calibration based on linear regression. +""" + +__author__ = "Théo de la Hogue" +__credits__ = [] +__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)" +__license__ = "BSD" + +from typing import TypeVar, Tuple +from dataclasses import dataclass, field + +from argaze import GazeFeatures + +from sklearn.linear_model import LinearRegression +import numpy +import cv2 + +GazePositionType = TypeVar('GazePositionType', bound="GazePositionType") +# Type definition for type annotation convenience + +@dataclass +class GazePositionCalibrator(GazeFeatures.GazePositionCalibrator): + """Calibration algorithm based on linear regression.""" + + coefficients: numpy.array = field(default_factory=lambda : numpy.array([[1., 0.], [0., 1.]])) + """Linear regression coefficients""" + + intercept: numpy.array = field(default_factory=lambda : numpy.array([0., 0.])) + """Linear regression intercept value""" + + def __post_init__(self): + """Init calibration data.""" + + self.reset() + + def store(self, timestamp: int|float, observed_gaze_position: GazeFeatures.GazePosition, expected_gaze_position: GazeFeatures.GazePosition): + """Store observed and expected gaze positions.""" + + self.__observed_positions.append(observed_gaze_position.value) + self.__expected_positions.append(expected_gaze_position.value) + + def reset(self): + """Reset observed and expected gaze positions.""" + + self.__observed_positions = [] + self.__expected_positions = [] + self.__linear_regression = None + + def calibrate(self) -> float: + """Process calibration from observed and expected gaze positions. + + Returns: + score: the score of linear regression + """ + + self.__linear_regression = LinearRegression().fit(self.__observed_positions, self.__expected_positions) + + return self.__linear_regression.score(self.__observed_positions, self.__expected_positions) + + def apply(self, gaze_position: GazeFeatures.GazePosition) -> GazePositionType: + """Apply calibration onto observed gaze position.""" + + return GazeFeatures.GazePosition(self.__linear_regression.predict(numpy.array([gaze_position.value]))[0], precision=gaze_position.precision) + + def draw(self, image: numpy.array): + """Draw calibration into image. + + Parameters: + image: where to draw + """ + + raise NotImplementedError('draw() method not implemented') + + @property + def ready(self) -> bool: + """Is the calibrator ready?""" + + return self.__linear_regression is not None \ No newline at end of file diff --git a/src/argaze/GazeAnalysis/__init__.py b/src/argaze/GazeAnalysis/__init__.py index 62e0823..c110eb1 100644 --- a/src/argaze/GazeAnalysis/__init__.py +++ b/src/argaze/GazeAnalysis/__init__.py @@ -1,4 +1,4 @@ """ Various gaze movement identification, AOI matching and scan path analysis algorithms. """ -__all__ = ['Basic', 'DispersionThresholdIdentification', 'VelocityThresholdIdentification', 'TransitionMatrix', 'KCoefficient', 'LempelZivComplexity', 'NGram', 'Entropy', 'NearestNeighborIndex', 'ExploreExploitRatio'] \ No newline at end of file +__all__ = ['Basic', 'DispersionThresholdIdentification', 'VelocityThresholdIdentification', 'TransitionMatrix', 'KCoefficient', 'LempelZivComplexity', 'NGram', 'Entropy', 'NearestNeighborIndex', 'ExploreExploitRatio', 'LinearRegression'] \ No newline at end of file diff --git a/src/argaze/GazeFeatures.py b/src/argaze/GazeFeatures.py index bd1a3da..b918256 100644 --- a/src/argaze/GazeFeatures.py +++ b/src/argaze/GazeFeatures.py @@ -201,6 +201,62 @@ class TimeStampedGazePositions(DataStructures.TimeStampedBuffer): return TimeStampedGazePositions(df.to_dict('index')) +@dataclass +class GazePositionCalibrator(): + """Abstract class to define what should provide a gaze position calibrator algorithm.""" + + def store(self, timestamp: int|float, observed_gaze_position: GazePosition, expected_gaze_position: GazePosition): + """Store observed and expected gaze positions. + + Parameters: + timestamp: time of observed gaze position + observed_gaze_position: where gaze position actually is + expected_gaze_position: where gaze position should be + """ + + raise NotImplementedError('calibrate() method not implemented') + + def reset(self): + """Reset observed and expected gaze positions.""" + + raise NotImplementedError('reset() method not implemented') + + def calibrate(self) -> Any: + """Process calibration from observed and expected gaze positions. + + Returns: + calibration outputs: any data returned to assess calibration + """ + + raise NotImplementedError('terminate() method not implemented') + + def apply(self, observed_gaze_position: GazePosition) -> GazePositionType: + """Apply calibration onto observed gaze position. + + Parameters: + observed_gaze_position: where gaze position actually is + + Returns: + expected_gaze_position: where gaze position should be + """ + + raise NotImplementedError('process() method not implemented') + + def draw(self, image: numpy.array): + """Draw calibration into image. + + Parameters: + image: where to draw + """ + + raise NotImplementedError('draw() method not implemented') + + @property + def ready(self) -> bool: + """Is the calibrator ready?""" + + raise NotImplementedError('ready getter not implemented') + GazeMovementType = TypeVar('GazeMovement', bound="GazeMovement") # Type definition for type annotation convenience -- cgit v1.1 From 322fa8af22f8880d58506fc18f4205ac4d3f937a Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 17 Oct 2023 15:58:55 +0200 Subject: adding gaze_position_calibrator to ArFrame. --- src/argaze/ArFeatures.py | 40 +++++++++++++++++++++++++---- src/argaze/GazeAnalysis/LinearRegression.py | 27 ++++++++++++++----- src/argaze/GazeFeatures.py | 19 +++++++++----- 3 files changed, 69 insertions(+), 17 deletions(-) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index a1c7349..cb1b2f6 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -523,7 +523,8 @@ class ArFrame(): Parameters: name: name of the frame - size: defines the dimension of the rectangular area where gaze positions are projected. + size: defines the dimension of the rectangular area where gaze positions are projected + gaze_position_calibrator: gaze position calibration algoritm gaze_movement_identifier: gaze movement identification algorithm filter_in_progress_identification: ignore in progress gaze movement identification scan_path: scan path object @@ -537,6 +538,7 @@ class ArFrame(): name: str size: tuple[int] = field(default=(1, 1)) + gaze_position_calibrator: GazeFeatures.GazePositionCalibrator = field(default_factory=GazeFeatures.GazePositionCalibrator) gaze_movement_identifier: GazeFeatures.GazeMovementIdentifier = field(default_factory=GazeFeatures.GazeMovementIdentifier) filter_in_progress_identification: bool = field(default=True) scan_path: GazeFeatures.ScanPath = field(default_factory=GazeFeatures.ScanPath) @@ -600,6 +602,24 @@ class ArFrame(): new_frame_size = (0, 0) + # Load gaze position calibrator + try: + + gaze_position_calibrator_value = frame_data.pop('gaze_position_calibrator') + + gaze_position_calibrator_module_path, gaze_position_calibrator_parameters = gaze_position_calibrator_value.popitem() + + # Prepend argaze.GazeAnalysis path when a single name is provided + if len(gaze_position_calibrator_module_path.split('.')) == 1: + gaze_position_calibrator_module_path = f'argaze.GazeAnalysis.{gaze_position_calibrator_module_path}' + + gaze_position_calibrator_module = importlib.import_module(gaze_position_calibrator_module_path) + new_gaze_position_calibrator = gaze_position_calibrator_module.GazePositionCalibrator(**gaze_position_calibrator_parameters) + + except KeyError: + + new_gaze_position_calibrator = None + # Load gaze movement identifier try: @@ -756,6 +776,7 @@ class ArFrame(): # Create frame return ArFrame(new_frame_name, \ new_frame_size, \ + new_gaze_position_calibrator, \ new_gaze_movement_identifier, \ filter_in_progress_identification, \ new_scan_path, \ @@ -815,6 +836,7 @@ class ArFrame(): gaze_position: gaze position to project Returns: + current_gaze_position: calibrated gaze position if gaze_position_calibrator is instanciated else, given gaze position. identified_gaze_movement: identified gaze movement from incoming consecutive timestamped gaze positions if gaze_movement_identifier is instanciated. Current gaze movement if filter_in_progress_identification is False. scan_path_analysis: scan path analysis at each new scan step if scan_path is instanciated. layers_analysis: aoi scan path analysis at each new aoi scan step for each instanciated layers aoi scan path. @@ -828,9 +850,6 @@ class ArFrame(): # Store look execution start date look_start = time.perf_counter() - # Update current gaze position - self.__gaze_position = gaze_position - # No gaze movement identified by default identified_gaze_movement = GazeFeatures.UnvalidGazeMovement() @@ -853,6 +872,16 @@ class ArFrame(): try: + # Apply gaze position calibration + if self.gaze_position_calibrator is not None: + + self.__gaze_position = self.gaze_position_calibrator.apply(gaze_position) + + # Or update gaze position at least + else: + + self.__gaze_position = gaze_position + # Identify gaze movement if self.gaze_movement_identifier is not None: @@ -942,6 +971,7 @@ class ArFrame(): print('Warning: the following error occurs in ArFrame.look method:', e) + self.__gaze_position = GazeFeatures.UnvalidGazePosition() identified_gaze_movement = GazeFeatures.UnvalidGazeMovement() scan_step_analysis = {} layer_analysis = {} @@ -954,7 +984,7 @@ class ArFrame(): self.__look_lock.release() # Return look data - return identified_gaze_movement, scan_step_analysis, layer_analysis, execution_times, exception + return self.__gaze_position, identified_gaze_movement, scan_step_analysis, layer_analysis, execution_times, exception def __image(self, background_weight: float = None, heatmap_weight: float = None, draw_scan_path: dict = None, draw_layers: dict = None, draw_gaze_positions: dict = None, draw_fixations: dict = None, draw_saccades: dict = None) -> numpy.array: """ diff --git a/src/argaze/GazeAnalysis/LinearRegression.py b/src/argaze/GazeAnalysis/LinearRegression.py index 5a92048..de7725d 100644 --- a/src/argaze/GazeAnalysis/LinearRegression.py +++ b/src/argaze/GazeAnalysis/LinearRegression.py @@ -31,9 +31,11 @@ class GazePositionCalibrator(GazeFeatures.GazePositionCalibrator): """Linear regression intercept value""" def __post_init__(self): - """Init calibration data.""" + """Init calibration.""" - self.reset() + self.__linear_regression = LinearRegression() + self.__linear_regression.coef_ = numpy.array(self.coefficients) + self.__linear_regression.intercept_ = numpy.array(self.intercept) def store(self, timestamp: int|float, observed_gaze_position: GazeFeatures.GazePosition, expected_gaze_position: GazeFeatures.GazePosition): """Store observed and expected gaze positions.""" @@ -57,12 +59,25 @@ class GazePositionCalibrator(GazeFeatures.GazePositionCalibrator): self.__linear_regression = LinearRegression().fit(self.__observed_positions, self.__expected_positions) + # Update frozen coefficients attribute + object.__setattr__(self, 'coefficients', self.__linear_regression.coef_) + + # Update frozen intercept attribute + object.__setattr__(self, 'intercept', self.__linear_regression.intercept_) + + # Return calibrated gaze position return self.__linear_regression.score(self.__observed_positions, self.__expected_positions) def apply(self, gaze_position: GazeFeatures.GazePosition) -> GazePositionType: """Apply calibration onto observed gaze position.""" - return GazeFeatures.GazePosition(self.__linear_regression.predict(numpy.array([gaze_position.value]))[0], precision=gaze_position.precision) + if not self.calibrating: + + return GazeFeatures.GazePosition(self.__linear_regression.predict(numpy.array([gaze_position.value]))[0], precision=gaze_position.precision) + + else: + + return gaze_position def draw(self, image: numpy.array): """Draw calibration into image. @@ -74,7 +89,7 @@ class GazePositionCalibrator(GazeFeatures.GazePositionCalibrator): raise NotImplementedError('draw() method not implemented') @property - def ready(self) -> bool: - """Is the calibrator ready?""" + def calibrating(self) -> bool: + """Is the calibration running?""" - return self.__linear_regression is not None \ No newline at end of file + return self.__linear_regression is None \ No newline at end of file diff --git a/src/argaze/GazeFeatures.py b/src/argaze/GazeFeatures.py index b918256..eddd01d 100644 --- a/src/argaze/GazeFeatures.py +++ b/src/argaze/GazeFeatures.py @@ -201,6 +201,13 @@ class TimeStampedGazePositions(DataStructures.TimeStampedBuffer): return TimeStampedGazePositions(df.to_dict('index')) +class GazePositionCalibrationFailed(Exception): + """Exception raised by GazePositionCalibrator.""" + + def __init__(self, message): + + super().__init__(message) + @dataclass class GazePositionCalibrator(): """Abstract class to define what should provide a gaze position calibrator algorithm.""" @@ -237,10 +244,10 @@ class GazePositionCalibrator(): observed_gaze_position: where gaze position actually is Returns: - expected_gaze_position: where gaze position should be + expected_gaze_position: where gaze position should be if the calibrator is ready else, observed gaze position """ - raise NotImplementedError('process() method not implemented') + raise NotImplementedError('apply() method not implemented') def draw(self, image: numpy.array): """Draw calibration into image. @@ -252,8 +259,8 @@ class GazePositionCalibrator(): raise NotImplementedError('draw() method not implemented') @property - def ready(self) -> bool: - """Is the calibrator ready?""" + def calibrating(self) -> bool: + """Is the calibration running?""" raise NotImplementedError('ready getter not implemented') @@ -601,7 +608,7 @@ ScanStepType = TypeVar('ScanStep', bound="ScanStep") # Type definition for type annotation convenience class ScanStepError(Exception): - """Exception raised at ScanStepError creation if a aoi scan step doesn't start by a fixation or doesn't end by a saccade.""" + """Exception raised at ScanStep creation if a aoi scan step doesn't start by a fixation or doesn't end by a saccade.""" def __init__(self, message): @@ -811,7 +818,7 @@ AOIScanStepType = TypeVar('AOIScanStep', bound="AOIScanStep") # Type definition for type annotation convenience class AOIScanStepError(Exception): - """Exception raised at AOIScanStepError creation if a aoi scan step doesn't start by a fixation or doesn't end by a saccade.""" + """Exception raised at AOIScanStep creation if a aoi scan step doesn't start by a fixation or doesn't end by a saccade.""" def __init__(self, message, aoi=''): -- cgit v1.1 From 86264748b88700ae7a197bfee7004c5114b17225 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 17 Oct 2023 17:55:03 +0200 Subject: Fixing JsonEncoder to handle class with numpy attributes. --- src/argaze/DataStructures.py | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/src/argaze/DataStructures.py b/src/argaze/DataStructures.py index 08a7d2c..b5101b2 100644 --- a/src/argaze/DataStructures.py +++ b/src/argaze/DataStructures.py @@ -45,6 +45,15 @@ def as_dict(dataclass_object) -> dict: # Copy fields values return {name: vars(dataclass_object)[name] for name in fields_names} +def module_path(obj): + """ + Get object module path. + + Returns: + module path + """ + return obj.__class__.__module__ + class JsonEncoder(json.JSONEncoder): """Specific ArGaze JSON Encoder.""" @@ -55,10 +64,10 @@ class JsonEncoder(json.JSONEncoder): if isinstance(obj, numpy.integer): return int(obj) - if isinstance(obj, numpy.floating): + elif isinstance(obj, numpy.floating): return float(obj) - if isinstance(obj, numpy.ndarray): + elif isinstance(obj, numpy.ndarray): return obj.tolist() # default case @@ -73,7 +82,19 @@ class JsonEncoder(json.JSONEncoder): public_dict = {} for k, v in vars(obj).items(): + if not k.startswith('_'): + + # numpy cases + if isinstance(v, numpy.integer): + v = int(v) + + elif isinstance(v, numpy.floating): + v = float(v) + + elif isinstance(v, numpy.ndarray): + v = v.tolist() + public_dict[k] = v return public_dict -- cgit v1.1 From 45af88fcb056ca0d5fd1be49972cef9b0f275fad Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 17 Oct 2023 17:57:08 +0200 Subject: Loading and saving GazePositionCalibrator instance from JSON file. --- src/argaze/ArFeatures.py | 19 +++++++++++++------ src/argaze/GazeFeatures.py | 45 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 6 deletions(-) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index cb1b2f6..2e278ea 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -607,14 +607,21 @@ class ArFrame(): gaze_position_calibrator_value = frame_data.pop('gaze_position_calibrator') - gaze_position_calibrator_module_path, gaze_position_calibrator_parameters = gaze_position_calibrator_value.popitem() + # str: relative path to file + if type(gaze_position_calibrator_value) == str: - # Prepend argaze.GazeAnalysis path when a single name is provided - if len(gaze_position_calibrator_module_path.split('.')) == 1: - gaze_position_calibrator_module_path = f'argaze.GazeAnalysis.{gaze_position_calibrator_module_path}' + filepath = os.path.join(working_directory, gaze_position_calibrator_value) + file_format = filepath.split('.')[-1] + + # JSON file format + if file_format == 'json': + + new_gaze_position_calibrator = GazeFeatures.GazePositionCalibrator.from_json(filepath) + + # dict: + else: - gaze_position_calibrator_module = importlib.import_module(gaze_position_calibrator_module_path) - new_gaze_position_calibrator = gaze_position_calibrator_module.GazePositionCalibrator(**gaze_position_calibrator_parameters) + new_gaze_position_calibrator = GazePositionCalibrator.from_dict(gaze_position_calibrator_value) except KeyError: diff --git a/src/argaze/GazeFeatures.py b/src/argaze/GazeFeatures.py index eddd01d..46e9f17 100644 --- a/src/argaze/GazeFeatures.py +++ b/src/argaze/GazeFeatures.py @@ -12,6 +12,7 @@ from dataclasses import dataclass, field import math import ast import json +import importlib from inspect import getmembers from argaze import DataStructures @@ -208,10 +209,54 @@ class GazePositionCalibrationFailed(Exception): super().__init__(message) +GazePositionCalibratorType = TypeVar('GazePositionCalibrator', bound="GazePositionCalibrator") +# Type definition for type annotation convenience + @dataclass class GazePositionCalibrator(): """Abstract class to define what should provide a gaze position calibrator algorithm.""" + @classmethod + def from_dict(self, calibrator_data: dict) -> GazePositionCalibratorType: + """Load gaze position calibrator from dictionary. + + Parameters: + calibrator_data: dictionary with class name and attributes to load + """ + gaze_position_calibrator_module_path, gaze_position_calibrator_parameters = calibrator_data.popitem() + + # Prepend argaze.GazeAnalysis path when a single name is provided + if len(gaze_position_calibrator_module_path.split('.')) == 1: + gaze_position_calibrator_module_path = f'argaze.GazeAnalysis.{gaze_position_calibrator_module_path}' + + gaze_position_calibrator_module = importlib.import_module(gaze_position_calibrator_module_path) + return gaze_position_calibrator_module.GazePositionCalibrator(**gaze_position_calibrator_parameters) + + @classmethod + def from_json(self, json_filepath: str) -> GazePositionCalibratorType: + """Load calibrator from .json file.""" + + # Remember file path to ease rewriting + self.__json_filepath = json_filepath + + # Open file + with open(self.__json_filepath) as calibration_file: + + return GazePositionCalibrator.from_dict(json.load(calibration_file)) + + def to_json(self, json_filepath: str = None): + """Save calibrator into .json file.""" + + # Remember file path to ease rewriting + if json_filepath is not None: + + self.__json_filepath = json_filepath + + # Open file + with open(self.__json_filepath, 'w', encoding='utf-8') as calibration_file: + + json.dump({DataStructures.module_path(self):DataStructures.JsonEncoder().default(self)}, calibration_file, ensure_ascii=False, indent=4) + def store(self, timestamp: int|float, observed_gaze_position: GazePosition, expected_gaze_position: GazePosition): """Store observed and expected gaze positions. -- cgit v1.1 From 07fb4ce51650e9b0edaf1b9ebc01c1b9589c9a54 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 17 Oct 2023 18:40:18 +0200 Subject: Implementing LinearRegression drawing function. --- src/argaze/GazeAnalysis/LinearRegression.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/src/argaze/GazeAnalysis/LinearRegression.py b/src/argaze/GazeAnalysis/LinearRegression.py index de7725d..0e10b87 100644 --- a/src/argaze/GazeAnalysis/LinearRegression.py +++ b/src/argaze/GazeAnalysis/LinearRegression.py @@ -79,14 +79,26 @@ class GazePositionCalibrator(GazeFeatures.GazePositionCalibrator): return gaze_position - def draw(self, image: numpy.array): - """Draw calibration into image. + def draw(self, image: numpy.array, size: tuple, resolution: tuple, line_color: tuple = (0, 0, 0), thickness: int = 1): + """Draw calibration field.""" + + width, height = size - Parameters: - image: where to draw - """ + if width * height > 0: + + rx, ry = resolution + lx = numpy.linspace(0, width, rx) + ly = numpy.linspace(0, height, ry) + xv, yv = numpy.meshgrid(lx, ly, indexing='ij') + + for i in range(rx): + + for j in range(ry): + + start = (xv[i][j], yv[i][j]) + end = self.apply(GazeFeatures.GazePosition(start)).value - raise NotImplementedError('draw() method not implemented') + cv2.line(image, (int(start[0]), int(start[1])), (int(end[0]), int(end[1])), line_color, thickness) @property def calibrating(self) -> bool: -- cgit v1.1 From 4575911ec8a04d7d228109acbcb58b448b72fb3b Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Tue, 17 Oct 2023 18:49:34 +0200 Subject: Adding draw_gaze_position_calibrator as image_parameters ArFrame option. --- src/argaze/ArFeatures.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 2e278ea..13b952d 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -993,13 +993,14 @@ class ArFrame(): # Return look data return self.__gaze_position, identified_gaze_movement, scan_step_analysis, layer_analysis, execution_times, exception - def __image(self, background_weight: float = None, heatmap_weight: float = None, draw_scan_path: dict = None, draw_layers: dict = None, draw_gaze_positions: dict = None, draw_fixations: dict = None, draw_saccades: dict = None) -> numpy.array: + def __image(self, background_weight: float = None, heatmap_weight: float = None, draw_gaze_position_calibrator: dict = None, draw_scan_path: dict = None, draw_layers: dict = None, draw_gaze_positions: dict = None, draw_fixations: dict = None, draw_saccades: dict = None) -> numpy.array: """ Get background image with overlaid visualisations. Parameters: background_weight: weight of background overlay heatmap_weight: weight of heatmap overlay + draw_gaze_position_calibrator: [GazeFeatures.GazePositionCalibrator.draw](argaze.md/#argaze.GazeFeatures.GazePositionCalibrator.draw) parameters (if None, nothing is drawn) draw_scan_path: [GazeFeatures.ScanPath.draw](argaze.md/#argaze.GazeFeatures.ScanPath.draw) parameters (if None, no scan path is drawn) draw_layers: dictionary of [ArLayer.draw](argaze.md/#argaze.ArFeatures.ArLayer.draw) parameters per layer (if None, no layer is drawn) draw_gaze_positions: [GazeFeatures.GazePosition.draw](argaze.md/#argaze.GazeFeatures.GazePosition.draw) parameters (if None, no gaze position is drawn) @@ -1032,6 +1033,11 @@ class ArFrame(): image = numpy.full((self.size[1], self.size[0], 3), 0).astype(numpy.uint8) + # Draw gaze position calibrator + if draw_gaze_position_calibrator is not None: + + self.gaze_position_calibrator.draw(image, size=self.size, **draw_gaze_position_calibrator) + # Draw scan path if required if draw_scan_path is not None and self.scan_path is not None: -- cgit v1.1 From 531775bd115c49fc15674fa9b53cb157b29ffaa8 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Wed, 18 Oct 2023 12:24:32 +0200 Subject: Fixing GazePositionCalibrator dict loading. --- src/argaze/ArFeatures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 13b952d..5b5d418 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -621,7 +621,7 @@ class ArFrame(): # dict: else: - new_gaze_position_calibrator = GazePositionCalibrator.from_dict(gaze_position_calibrator_value) + new_gaze_position_calibrator = GazeFeatures.GazePositionCalibrator.from_dict(gaze_position_calibrator_value) except KeyError: -- cgit v1.1 From d9dc8fc6f542c1ba46cba9d66a741890f946474a Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Mon, 23 Oct 2023 14:34:10 +0200 Subject: Removing draw_places_axes image parameter. Fixing aruco markers group drawing. --- .../aruco_markers_pipeline/pose_estimation.md | 4 --- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 35 ++-------------------- 2 files changed, 2 insertions(+), 37 deletions(-) diff --git a/docs/user_guide/aruco_markers_pipeline/pose_estimation.md b/docs/user_guide/aruco_markers_pipeline/pose_estimation.md index 6027039..6b58b24 100644 --- a/docs/user_guide/aruco_markers_pipeline/pose_estimation.md +++ b/docs/user_guide/aruco_markers_pipeline/pose_estimation.md @@ -51,10 +51,6 @@ Here is an extract from the JSON [ArUcoCamera](../../argaze.md/#argaze.ArUcoMark "draw_places": { "color": [0, 0, 0], "border_size": 1 - }, - "draw_places_axes": { - "thickness": 1, - "length": 2.5 } } } diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index edae927..b4aedbd 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -432,11 +432,7 @@ class ArUcoMarkersGroup(): try: - T = self.places[identifier].translation - R = self.places[identifier].rotation - - placePoints = (T + numpy.float32([R.dot([-l, -l, 0]), R.dot([l, -l, 0]), R.dot([l, l, 0]), R.dot([-l, l, 0])])).reshape(-1, 3) - placePoints, _ = cv2.projectPoints(placePoints, self._rotation, self._translation, numpy.array(K), numpy.array(D)) + placePoints, _ = cv2.projectPoints(place.corners, self._rotation, self._translation, numpy.array(K), numpy.array(D)) placePoints = placePoints.astype(int) cv2.line(image, tuple(placePoints[0].ravel()), tuple(placePoints[1].ravel()), color, border_size) @@ -448,29 +444,7 @@ class ArUcoMarkersGroup(): except cv2.error: pass - def draw_places_axes(self, image: numpy.array, K, D, thickness: int = 0, length: float = 0): - """Draw group place axes.""" - - for identifier, place in self.places.items(): - - try: - - T = self.places[identifier].translation - R = self.places[identifier].rotation - - axisPoints = (T + numpy.float32([R.dot([length, 0, 0]), R.dot([0, length, 0]), R.dot([0, 0, length]), R.dot([0, 0, 0])])).reshape(-1, 3) - axisPoints, _ = cv2.projectPoints(axisPoints, self._rotation, self._translation, numpy.array(K), numpy.array(D)) - axisPoints = axisPoints.astype(int) - - cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (0, 0, 255), thickness) # X (red) - cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (0, 255, 0), thickness) # Y (green) - cv2.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (255, 0, 0), thickness) # Z (blue) - - # Ignore errors due to out of field places: their coordinate are larger than int32 limitations. - except cv2.error: - pass - - def draw(self, image: numpy.array, K, D, draw_axes: dict = None, draw_places: dict = None, draw_places_axes: dict = None): + def draw(self, image: numpy.array, K, D, draw_axes: dict = None, draw_places: dict = None): """Draw group axes and places. Parameters: @@ -489,11 +463,6 @@ class ArUcoMarkersGroup(): self.draw_places(image, K, D, **draw_places) - # Draw places axes if required - if draw_places_axes is not None: - - self.draw_places_axes(image, K, D, **draw_places_axes) - def to_obj(self, obj_filepath): """Save group to .obj file.""" -- cgit v1.1 From ecfbc519e518948078b0831ccd0fa8bb18e439cb Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Mon, 23 Oct 2023 15:00:17 +0200 Subject: Removing parsing of normals for ArUcoMarkersGroup. --- .../aruco_markers_description.md | 4 ---- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 19 ++----------------- 2 files changed, 2 insertions(+), 21 deletions(-) diff --git a/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md b/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md index 8104345..6380f88 100644 --- a/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md +++ b/docs/user_guide/aruco_markers_pipeline/aruco_markers_description.md @@ -65,21 +65,18 @@ v 0.000000 0.000000 0.000000 v 5.000000 0.000000 0.000000 v 0.000000 5.000000 0.000000 v 5.000000 5.000000 0.000000 -vn 0.0000 0.0000 1.0000 f 1//1 2//1 4//1 3//1 o DICT_APRILTAG_16h5#1_Marker v -0.855050 24.000002 4.349232 v 0.855050 24.000002 -0.349231 v -0.855050 29.000002 4.349232 v 0.855050 29.000002 -0.349231 -vn 0.9397 0.0000 0.3420 f 5//2 6//2 8//2 7//2 o DICT_APRILTAG_16h5#2_Marker v 44.000000 0.000000 9.500000 v 49.000000 0.000000 9.500000 v 44.000000 -0.000000 4.500000 v 49.000000 -0.000000 4.500000 -vn 0.0000 1.0000 -0.0000 f 9//3 10//3 12//3 11//3 ``` @@ -87,7 +84,6 @@ Here are common OBJ file features needed to describe ArUco markers places: * Object lines (starting with *o* key) indicate markers dictionary and id by following this format: **DICTIONARY**#**ID**\_Marker. * Vertice lines (starting with *v* key) indicate markers corners. The marker size will be automatically deducted from the geometry. -* Plane normals (starting with *vn* key) need to be exported for further pose estimation. * Face (starting with *f* key) link vertices and normals indexes together. !!! warning diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index b4aedbd..5cacf09 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -152,9 +152,6 @@ class ArUcoMarkersGroup(): !!! note All markers have to belong to the same dictionary. - !!! note - Marker normal vectors (vn) expected. - """ new_marker_size = 0 @@ -165,8 +162,7 @@ class ArUcoMarkersGroup(): OBJ_RX_DICT = { 'object': re.compile(r'o (.*)#([0-9]+)_(.*)\n'), 'vertice': re.compile(r'v ([+-]?[0-9]*[.]?[0-9]+) ([+-]?[0-9]*[.]?[0-9]+) ([+-]?[0-9]*[.]?[0-9]+)\n'), - 'normal': re.compile(r'vn ([+-]?[0-9]*[.]?[0-9]+) ([+-]?[0-9]*[.]?[0-9]+) ([+-]?[0-9]*[.]?[0-9]+)\n'), - 'face': re.compile(r'f ([0-9]+)//([0-9]+) ([0-9]+)//([0-9]+) ([0-9]+)//([0-9]+) ([0-9]+)//([0-9]+)\n'), + 'face': re.compile(r'f ([0-9]+) ([0-9]+) ([0-9]+) ([0-9]+)\n'), 'comment': re.compile(r'#(.*)\n') # keep comment regex after object regex because the # is used in object string too } @@ -186,7 +182,6 @@ class ArUcoMarkersGroup(): identifier = None vertices = [] - normals = {} faces = {} # Open the file and read through it line by line @@ -225,15 +220,10 @@ class ArUcoMarkersGroup(): vertices.append(tuple([float(match.group(1)), float(match.group(2)), float(match.group(3))])) - # Extract normal to calculate rotation matrix - elif key == 'normal': - - normals[identifier] = tuple([float(match.group(1)), float(match.group(2)), float(match.group(3))]) - # Extract vertice ids elif key == 'face': - faces[identifier] = [int(match.group(1)), int(match.group(3)), int(match.group(5)), int(match.group(7))] + faces[identifier] = [int(match.group(1)), int(match.group(2)), int(match.group(3)), int(match.group(4))] # Go to next line line = file.readline() @@ -487,10 +477,5 @@ class ArUcoMarkersGroup(): vertices += f' {v_count}//{p+1}' - # Write normal vector - nvec = numpy.cross(place.corners[-1] - place.corners[0], place.corners[1] - place.corners[0]) - nvec = nvec / numpy.linalg.norm(nvec) - file.write(f'vn {nvec[0]} {nvec[1]} {nvec[2]}\n') - #file.write('s off\n') file.write(f'f{vertices}\n') -- cgit v1.1 From 3a83442c2da865b6307748a2f26c48fa1bb3fbc8 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Mon, 23 Oct 2023 15:06:20 +0200 Subject: Fixing documentation annotation. --- src/argaze/ArFeatures.py | 2 +- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 1 - src/argaze/DataStructures.py | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py index 5b5d418..5ec6b7e 100644 --- a/src/argaze/ArFeatures.py +++ b/src/argaze/ArFeatures.py @@ -831,7 +831,7 @@ class ArFrame(): return self.__ts_logs - def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition = GazeFeatures.UnvalidGazePosition()) -> Tuple[GazeFeatures.GazeMovement, dict, dict, dict, Exception]: + def look(self, timestamp: int|float, gaze_position: GazeFeatures.GazePosition = GazeFeatures.UnvalidGazePosition()) -> Tuple[GazeFeatures.GazePosition, GazeFeatures.GazeMovement, dict, dict, dict, Exception]: """ Project gaze position into frame. diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index 5cacf09..6ffdae2 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -440,7 +440,6 @@ class ArUcoMarkersGroup(): Parameters: draw_axes: draw_axes parameters (if None, no axes drawn) draw_places: draw_places parameters (if None, no places drawn) - draw_places_axes: draw_places_axes parameters (if None, no places axes drawn) """ # Draw axes if required diff --git a/src/argaze/DataStructures.py b/src/argaze/DataStructures.py index b5101b2..9e35dea 100644 --- a/src/argaze/DataStructures.py +++ b/src/argaze/DataStructures.py @@ -45,7 +45,7 @@ def as_dict(dataclass_object) -> dict: # Copy fields values return {name: vars(dataclass_object)[name] for name in fields_names} -def module_path(obj): +def module_path(obj) -> str: """ Get object module path. -- cgit v1.1 From 0ad125b331799aab99c138bbe71e5576a2e7271c Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Mon, 23 Oct 2023 19:39:55 +0200 Subject: Removing a useless warning. --- docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md b/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md index 3bded3a..60a1115 100644 --- a/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md +++ b/docs/user_guide/aruco_markers_pipeline/configuration_and_execution.md @@ -114,9 +114,6 @@ Pass each camera image to [ArUcoCamera.watch](../../argaze.md/#argaze.ArFeatures ... aruco_camera.image() ``` -!!! warning "Pose estimation error" - ArUco markers pose estimation algorithm can lead to errors due to geometric ambiguities as explain in [this article](https://ieeexplore.ieee.org/document/1717461). To discard such ambiguous cases, markers should **as less as possible be parallel to camera plan**. - ### Analyse timestamped gaze positions into camera frame As mentioned above, [ArUcoCamera](../../argaze.md/#argaze.ArUcoMarkers.ArUcoCamera) inherits from [ArFrame](../../argaze.md/#argaze.ArFeatures.ArFrame) and so, benefits from all the services described in [gaze analysis pipeline section](../gaze_analysis_pipeline/introduction.md). -- cgit v1.1 From f8b1a36c9e486ef19f62159475b9bf19a5b90a03 Mon Sep 17 00:00:00 2001 From: Théo de la Hogue Date: Mon, 23 Oct 2023 22:43:24 +0200 Subject: Fixing aruco markers group exportation. Renaming ready made script. Updating documentation. --- docs/user_guide/utils/ready-made_scripts.md | 6 +- src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py | 2 +- src/argaze/utils/aruco_markers_group_export.py | 160 ++++++++++++++++++++++ src/argaze/utils/aruco_markers_scene_export.py | 176 ------------------------- 4 files changed, 164 insertions(+), 180 deletions(-) create mode 100644 src/argaze/utils/aruco_markers_group_export.py delete mode 100644 src/argaze/utils/aruco_markers_scene_export.py diff --git a/docs/user_guide/utils/ready-made_scripts.md b/docs/user_guide/utils/ready-made_scripts.md index c82a332..55258e9 100644 --- a/docs/user_guide/utils/ready-made_scripts.md +++ b/docs/user_guide/utils/ready-made_scripts.md @@ -9,10 +9,10 @@ Collection of command-line scripts to provide useful features. !!! note *Use -h option to get command arguments documentation.* -## ArUco scene exporter +## ArUco markers group exporter -Load a MOVIE with ArUco markers inside and select image into it, detect ArUco markers belonging to DICT_APRILTAG_16h5 dictionary with 5cm size into the selected image thanks to given OPTIC_PARAMETERS and DETECTOR_PARAMETERS files then, export detected ArUco markers scene as .obj file into an *./src/argaze/utils/_export/scenes* folder. +Load a MOVIE and an ArUcoCamera CONFIGURATION to detect ArUco markers inside a selected movie frame then, export detected ArUco markers group as .obj file into an OUTPUT folder. ```shell -python ./src/argaze/utils/aruco_markers_scene_export.py MOVIE DICT_APRILTAG_16h5 5 OPTIC_PARAMETERS DETECTOR_PARAMETERS -o ./src/argaze/utils/_export/scenes +python ./src/argaze/utils/aruco_markers_group_export.py MOVIE CONFIGURATION -o OUTPUT ``` \ No newline at end of file diff --git a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py index 6ffdae2..37bceec 100644 --- a/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py +++ b/src/argaze/ArUcoMarkers/ArUcoMarkersGroup.py @@ -474,7 +474,7 @@ class ArUcoMarkersGroup(): file.write(f'v {" ".join(map(str, place.corners[v]))}\n') v_count += 1 - vertices += f' {v_count}//{p+1}' + vertices += f' {v_count}' #file.write('s off\n') file.write(f'f{vertices}\n') diff --git a/src/argaze/utils/aruco_markers_group_export.py b/src/argaze/utils/aruco_markers_group_export.py new file mode 100644 index 0000000..d948105 --- /dev/null +++ b/src/argaze/utils/aruco_markers_group_export.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python + +""" """ + +__author__ = "Théo de la Hogue" +__credits__ = [] +__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)" +__license__ = "BSD" + +import argparse +import time +import itertools + +from argaze.ArUcoMarkers import ArUcoCamera, ArUcoMarkersGroup +from argaze.utils import UtilsFeatures + +import cv2 +import numpy + +def main(): + """ + Load a MOVIE and an ArUcoCamera CONFIGURATION to detect ArUco markers inside a selected movie frame then, export detected ArUco markers group as .obj file into an OUTPUT folder. + """ + + # Manage arguments + parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) + parser.add_argument('movie', metavar='MOVIE', type=str, default=None, help='movie path') + parser.add_argument('configuration', metavar='CONFIGURATION', type=str, default=None, help='ArUco camera configuration') + + parser.add_argument('-s','--start', metavar='START', type=float, default=0., help='start time in second') + parser.add_argument('-o', '--output', metavar='OUTPUT', type=str, default='.', help='export folder path') + args = parser.parse_args() + + # Load movie + video_capture = cv2.VideoCapture(args.movie) + + video_fps = video_capture.get(cv2.CAP_PROP_FPS) + image_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)) + image_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) + + # Load ArUco camera + aruco_camera = ArUcoCamera.ArUcoCamera.from_json(args.configuration) + + # Create empty ArUco scene + aruco_markers_group = None + + # Create a window + cv2.namedWindow(aruco_camera.name, cv2.WINDOW_AUTOSIZE) + + # Enable exit signal handler + exit = UtilsFeatures.ExitSignalHandler() + + # Init image selection + current_image_index = -1 + _, current_image = video_capture.read() + next_image_index = int(args.start * video_fps) + refresh = False + + while not exit.status(): + + # Select a new image and detect markers once + if next_image_index != current_image_index or refresh: + + video_capture.set(cv2.CAP_PROP_POS_FRAMES, next_image_index) + + success, video_image = video_capture.read() + + if success: + + # Refresh once + refresh = False + + current_image_index = video_capture.get(cv2.CAP_PROP_POS_FRAMES) - 1 + current_image_time = video_capture.get(cv2.CAP_PROP_POS_MSEC) + + # Detect markers + detection_time, projection_time, exceptions = aruco_camera.watch(video_image) + + # Estimate each markers pose + aruco_camera.aruco_detector.estimate_markers_pose(aruco_camera.aruco_detector.detected_markers) + + # Build aruco scene from detected markers + aruco_markers_group = ArUcoMarkersGroup.ArUcoMarkersGroup(aruco_camera.aruco_detector.marker_size, aruco_camera.aruco_detector.dictionary, aruco_camera.aruco_detector.detected_markers) + + # Get camera image + camera_image = aruco_camera.image() + + # Write detected markers + cv2.putText(camera_image, f'Detecting markers {list(aruco_camera.aruco_detector.detected_markers.keys())}', (20, aruco_camera.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) + + # Write timing + cv2.putText(camera_image, f'Frame at {int(current_image_time)}ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) + cv2.putText(camera_image, f'Detection {int(detection_time)}ms', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) + cv2.putText(camera_image, f'Projection {int(projection_time)}ms', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) + + # Write documentation + cv2.putText(camera_image, f'<- previous image', (aruco_camera.size[0]-500, aruco_camera.size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) + cv2.putText(camera_image, f'-> next image', (aruco_camera.size[0]-500, aruco_camera.size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) + cv2.putText(camera_image, f'Ctrl+s: export ArUco markers', (aruco_camera.size[0]-500, aruco_camera.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) + + # Copy image + current_image = camera_image.copy() + + # Keep last image + else: + + video_image = current_image.copy() + + key_pressed = cv2.waitKey(10) + + #if key_pressed != -1: + # print(key_pressed) + + # Select previous image with left arrow + if key_pressed == 2: + next_image_index -= 1 + + # Select next image with right arrow + if key_pressed == 3: + next_image_index += 1 + + # Clip image index + if next_image_index < 0: + next_image_index = 0 + + # r: reload configuration + if key_pressed == 114: + + aruco_camera = ArUcoCamera.ArUcoCamera.from_json(args.configuration) + refresh = True + print('Configuration reloaded') + + # Save selected marker edition using 'Ctrl + s' + if key_pressed == 19: + + if aruco_markers_group: + + aruco_markers_group.to_obj(f'{args.output}/{int(current_image_time)}-aruco_markers_group.obj') + print(f'ArUco markers saved into {args.output}') + + else: + + print(f'No ArUco markers to export') + + # Close window using 'Esc' key + if key_pressed == 27: + break + + # Display video + cv2.imshow(aruco_camera.name, video_image) + + # Close movie capture + video_capture.release() + + # Stop image display + cv2.destroyAllWindows() + +if __name__ == '__main__': + + main() \ No newline at end of file diff --git a/src/argaze/utils/aruco_markers_scene_export.py b/src/argaze/utils/aruco_markers_scene_export.py deleted file mode 100644 index f618342..0000000 --- a/src/argaze/utils/aruco_markers_scene_export.py +++ /dev/null @@ -1,176 +0,0 @@ -#!/usr/bin/env python - -""" """ - -__author__ = "Théo de la Hogue" -__credits__ = [] -__copyright__ = "Copyright 2023, Ecole Nationale de l'Aviation Civile (ENAC)" -__license__ = "BSD" - -import argparse -import time -import itertools - -from argaze.ArUcoMarkers import ArUcoMarkersDictionary, ArUcoOpticCalibrator, ArUcoDetector, ArUcoMarkersGroup -from argaze.utils import UtilsFeatures - -import cv2 -import numpy - -def main(): - """ - Load a movie with ArUco markers inside and select image into it, detect ArUco markers belonging to a given dictionary and size into the selected image thanks to given optic parameters and detector parameters then, export detected ArUco scene as .obj file. - """ - - # Manage arguments - parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0]) - parser.add_argument('movie', metavar='MOVIE', type=str, default=None, help='movie path') - parser.add_argument('dictionary', metavar='DICTIONARY', type=str, default=None, help='ArUco dictionary to detect') - parser.add_argument('marker_size', metavar='MARKER_SIZE', type=int, default=3, help='marker size in cm') - parser.add_argument('optic_parameters', metavar='OPTIC_PARAMETERS', type=str, default=None, help='Optic parameters from camera calibration process') - parser.add_argument('detector_parameters', metavar='DETECTOR_PARAMETERS', type=str, default=None, help='ArUco detector parameters') - - parser.add_argument('-s','--start', metavar='START', type=float, default=0., help='start time in second') - parser.add_argument('-o', '--output', metavar='OUT', type=str, default='.', help='export scene folder path') - args = parser.parse_args() - - # Load movie - video_capture = cv2.VideoCapture(args.movie) - - video_fps = video_capture.get(cv2.CAP_PROP_FPS) - image_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)) - image_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) - - # Load ArUco dictionary - aruco_dictionary = ArUcoMarkersDictionary.ArUcoMarkersDictionary(args.dictionary) - - # Load optic parameters - optic_parameters = ArUcoOpticCalibrator.OpticParameters.from_json(args.optic_parameters) - - # Load detector parameters - detector_parameters = ArUcoDetector.DetectorParameters.from_json(args.detector_parameters) - - # Create ArUco detector - aruco_detector = ArUcoDetector.ArUcoDetector(dictionary=aruco_dictionary, marker_size=args.marker_size, optic_parameters=optic_parameters, parameters=detector_parameters) - - # Create empty ArUco scene - aruco_markers_group = None - - # Create a window to display AR environment - window_name = "Export ArUco scene" - cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE) - - # Enable exit signal handler - exit = UtilsFeatures.ExitSignalHandler() - - # Init image selection - current_image_index = -1 - _, current_image = video_capture.read() - next_image_index = int(args.start * video_fps) - refresh = False - - # Hide help - draw_help = False - - while not exit.status(): - - # Select a new image and detect markers once - if next_image_index != current_image_index or refresh: - - video_capture.set(cv2.CAP_PROP_POS_FRAMES, next_image_index) - - success, video_image = video_capture.read() - - if success: - - # Refresh once - refresh = False - - current_image_index = video_capture.get(cv2.CAP_PROP_POS_FRAMES) - 1 - current_image_time = video_capture.get(cv2.CAP_PROP_POS_MSEC) - - # Detect markers - aruco_detector.detect_markers(video_image) - - # Estimate markers pose - aruco_detector.estimate_markers_pose() - - # Build aruco scene from detected markers - aruco_markers_group = ArUcoMarkersGroup.ArUcoMarkersGroup(args.marker_size, aruco_dictionary, aruco_detector.detected_markers) - - # Write scene detected markers - cv2.putText(video_image, f'{list(aruco_detector.detected_markers.keys())}', (20, image_height-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) - - # Write timing - cv2.putText(video_image, f'Time: {int(current_image_time)} ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA) - - # Copy image - current_image = video_image.copy() - - # Keep last image - else: - - video_image = current_image.copy() - - # Draw detected markers - aruco_detector.draw_detected_markers(video_image, {"color": [0, 255, 0], "draw_axes": {"thickness": 4}}) - - # Write documentation - cv2.putText(video_image, f'Press \'h\' for help', (950, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) - - if draw_help: - - cv2.rectangle(video_image, (0, 50), (500, 300), (127, 127, 127), -1) - cv2.putText(video_image, f'> Left arrow: previous image', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) - cv2.putText(video_image, f'> Right arrow: next image', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) - cv2.putText(video_image, f'> Ctrl+s: export ArUco scene', (20, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) - - key_pressed = cv2.waitKey(10) - - #if key_pressed != -1: - # print(key_pressed) - - # Select previous image with left arrow - if key_pressed == 2: - next_image_index -= 1 - - # Select next image with right arrow - if key_pressed == 3: - next_image_index += 1 - - # Clip image index - if next_image_index < 0: - next_image_index = 0 - - # Switch help mode with h key - if key_pressed == 104: - draw_help = not draw_help - - # Save selected marker edition using 'Ctrl + s' - if key_pressed == 19: - - if aruco_markers_group: - - aruco_markers_group.to_obj(f'{args.output}/{int(current_image_time)}-aruco_markers_group.obj') - print(f'ArUco scene saved into {args.output}') - - else: - - print(f'No ArUco scene to export') - - # Close window using 'Esc' key - if key_pressed == 27: - break - - # Display video - cv2.imshow(window_name, video_image) - - # Close movie capture - video_capture.release() - - # Stop image display - cv2.destroyAllWindows() - -if __name__ == '__main__': - - main() \ No newline at end of file -- cgit v1.1