aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--docs/use_cases/simone_a320_cockpit_simulator.md2
-rw-r--r--docs/user_guide/ar_environment/environment_exploitation.md10
-rw-r--r--docs/user_guide/areas_of_interest/aoi_matching.md2
-rw-r--r--docs/user_guide/areas_of_interest/aoi_scene_projection.md4
-rw-r--r--docs/user_guide/aruco_markers/camera_calibration.md12
-rw-r--r--docs/user_guide/aruco_markers/markers_detection.md10
-rw-r--r--docs/user_guide/timestamped_data/introduction.md2
-rw-r--r--docs/user_guide/utils/demonstrations_scripts.md2
-rw-r--r--docs/user_guide/utils/ready-made_scripts.md2
-rw-r--r--src/argaze.test/ArUcoMarkers/ArUcoDetector.py8
-rw-r--r--src/argaze/ArFeatures.py18
-rw-r--r--src/argaze/ArUcoMarkers/ArUcoDetector.py28
-rw-r--r--src/argaze/ArUcoMarkers/ArUcoMarker.py10
-rw-r--r--src/argaze/ArUcoMarkers/ArUcoOpticCalibrator.py8
-rw-r--r--src/argaze/ArUcoMarkers/ArUcoScene.py24
-rw-r--r--src/argaze/AreaOfInterest/AOI2DScene.py22
-rw-r--r--src/argaze/AreaOfInterest/AOI3DScene.py4
-rw-r--r--src/argaze/AreaOfInterest/AOIFeatures.py16
-rw-r--r--src/argaze/GazeAnalysis/DispersionThresholdIdentification.py14
-rw-r--r--src/argaze/GazeAnalysis/VelocityThresholdIdentification.py14
-rw-r--r--src/argaze/GazeFeatures.py28
-rw-r--r--src/argaze/utils/aruco_markers_scene_export.py72
-rw-r--r--src/argaze/utils/camera_calibrate.py26
-rw-r--r--src/argaze/utils/demo_ar_features_run.py38
-rw-r--r--src/argaze/utils/demo_gaze_features_run.py80
-rw-r--r--src/argaze/utils/demo_heatmap_run.py14
-rw-r--r--src/argaze/utils/environment_edit.py270
27 files changed, 370 insertions, 370 deletions
diff --git a/docs/use_cases/simone_a320_cockpit_simulator.md b/docs/use_cases/simone_a320_cockpit_simulator.md
index 7b11c01..4a4bc4f 100644
--- a/docs/use_cases/simone_a320_cockpit_simulator.md
+++ b/docs/use_cases/simone_a320_cockpit_simulator.md
@@ -19,7 +19,7 @@ The 3D scan have been loaded in a 3D editor to help in ArUco markers and AOI pos
![ArUco scene](../img/simone_aruco_scene.png) ![AOI scene](../img/simone_aoi_scene.png)
-Finally, a python script connect Tobii eyetracker glasses to ArGaze toolkit. The 3D AR environment is loaded then, ArUco markers are detected from Tobii eyetracker field camera stream allowing to estimate pilote head pose. The AOI are projected into camera frame then, gaze positions are analyzed to identify fixations and saccades to finally check if fixations matched any projected AOI.
+Finally, a python script connect Tobii eyetracker glasses to ArGaze toolkit. The 3D AR environment is loaded then, ArUco markers are detected from Tobii eyetracker field camera stream allowing to estimate pilote head pose. The AOI are projected into camera image then, gaze positions are analyzed to identify fixations and saccades to finally check if fixations matched any projected AOI.
![AOI and gaze projection](../img/simone_projection.png)
diff --git a/docs/user_guide/ar_environment/environment_exploitation.md b/docs/user_guide/ar_environment/environment_exploitation.md
index f07d150..a4013ea 100644
--- a/docs/user_guide/ar_environment/environment_exploitation.md
+++ b/docs/user_guide/ar_environment/environment_exploitation.md
@@ -4,8 +4,8 @@ Environment exploitation
Once loaded, [ArEnvironment](../../../argaze/#argaze.ArFeatures.ArEnvironment) assets can be exploited as illustrated below:
```python
-# Access to AR environment ArUco detector passing it a frame where to detect ArUco markers
-ar_environment.aruco_detector.detect_markers(frame)
+# Access to AR environment ArUco detector passing it a image where to detect ArUco markers
+ar_environment.aruco_detector.detect_markers(image)
# Access to an AR environment scene
my_first_scene = ar_environment.scenes['my first AR scene']
@@ -15,15 +15,15 @@ try:
# Try to estimate AR scene pose from detected markers
tvec, rmat, consistent_markers = my_first_scene.estimate_pose(ar_environment.aruco_detector.detected_markers)
- # Project AR scene into camera frame according estimated pose
+ # Project AR scene into camera image according estimated pose
# Optional visual_hfov argument is set to 160° to clip AOI scene according a cone vision
aoi2D_scene = my_first_scene.project(tvec, rmat, visual_hfov=160)
# Draw estimated AR scene axis
- my_first_scene.draw_axis(frame)
+ my_first_scene.draw_axis(image)
# Draw AOI2D scene projection
- aoi2D_scene.draw(frame)
+ aoi2D_scene.draw(image)
# Do something with AOI2D scene projection
...
diff --git a/docs/user_guide/areas_of_interest/aoi_matching.md b/docs/user_guide/areas_of_interest/aoi_matching.md
index 1e18238..ff658a2 100644
--- a/docs/user_guide/areas_of_interest/aoi_matching.md
+++ b/docs/user_guide/areas_of_interest/aoi_matching.md
@@ -5,7 +5,7 @@ title: AOI matching
AOI matching
============
-Once [AOI3DScene](../../../argaze/#argaze.AreaOfInterest.AOI3DScene) is projected into a frame as [AOI2DScene](../../../argaze/#argaze.AreaOfInterest.AOI2DScene), it could be needed to know which AOI is looked.
+Once [AOI3DScene](../../../argaze/#argaze.AreaOfInterest.AOI3DScene) is projected as [AOI2DScene](../../../argaze/#argaze.AreaOfInterest.AOI2DScene), it could be needed to know which AOI is looked.
The [AreaOfInterest](../../../argaze/#argaze.AreaOfInterest.AOIFeatures.AreaOfInterest) class in [AOIFeatures](../../../argaze/#argaze.AreaOfInterest.AOIFeatures) provides two ways to accomplish such task.
diff --git a/docs/user_guide/areas_of_interest/aoi_scene_projection.md b/docs/user_guide/areas_of_interest/aoi_scene_projection.md
index ad50f6f..bdb3fe0 100644
--- a/docs/user_guide/areas_of_interest/aoi_scene_projection.md
+++ b/docs/user_guide/areas_of_interest/aoi_scene_projection.md
@@ -5,7 +5,7 @@ title: AOI scene projection
AOI scene projection
====================
-An [AOI3DScene](../../../argaze/#argaze.AreaOfInterest.AOI3DScene) can be rotated and translated according to a pose estimation before to project it onto camera frame as an [AOI2DScene](../../../argaze/#argaze.AreaOfInterest.AOI2DScene).
+An [AOI3DScene](../../../argaze/#argaze.AreaOfInterest.AOI3DScene) can be rotated and translated according to a pose estimation before to project it onto camera image as an [AOI2DScene](../../../argaze/#argaze.AreaOfInterest.AOI2DScene).
![AOI projection](../../img/aoi_projection.png)
@@ -18,5 +18,5 @@ An [AOI3DScene](../../../argaze/#argaze.AreaOfInterest.AOI3DScene) can be rotate
aoi2D_scene = aoi3D_scene.project(tvec, rmat, optic_parameters.K)
# Draw AOI 2D scene
-aoi2D_scene.draw(frame)
+aoi2D_scene.draw(image)
```
diff --git a/docs/user_guide/aruco_markers/camera_calibration.md b/docs/user_guide/aruco_markers/camera_calibration.md
index 7bff480..1019fc1 100644
--- a/docs/user_guide/aruco_markers/camera_calibration.md
+++ b/docs/user_guide/aruco_markers/camera_calibration.md
@@ -24,7 +24,7 @@ Then, the calibration process needs to make many different captures of an [ArUco
![Calibration step](../../img/camera_calibration_step.png)
-The sample of code below shows how to detect board corners into camera frames, store detected corners then process them to build calibration data and, finally, save it into a JSON file:
+The sample of code below shows how to detect board corners into camera images, store detected corners then process them to build calibration data and, finally, save it into a JSON file:
``` python
from argaze.ArUcoMarkers import ArUcoMarkersDictionary, ArUcoOpticCalibrator, ArUcoBoard, ArUcoDetector
@@ -42,19 +42,19 @@ expected_aruco_board = ArUcoBoard.ArUcoBoard(7, 5, 5, 3, aruco_dictionary)
# Create ArUco detector
aruco_detector = ArUcoDetector.ArUcoDetector(dictionary=aruco_dictionary, marker_size=3)
-# Capture frames from a live Full HD video stream (1920x1080)
+# Capture images from a live Full HD video stream (1920x1080)
while video_stream.is_alive():
- frame = video_stream.read()
+ image = video_stream.read()
- # Detect all board corners in frame
- aruco_detector.detect_board(frame, expected_aruco_board, expected_aruco_board.markers_number)
+ # Detect all board corners in image
+ aruco_detector.detect_board(image, expected_aruco_board, expected_aruco_board.markers_number)
# If board corners are detected
if aruco_detector.board_corners_number > 0:
# Draw board corners to show that board tracking succeeded
- aruco_detector.draw_board(frame)
+ aruco_detector.draw_board(image)
# Append tracked board data for further calibration processing
aruco_optic_calibrator.store_calibration_data(aruco_detector.board_corners, aruco_detector.board_corners_identifier)
diff --git a/docs/user_guide/aruco_markers/markers_detection.md b/docs/user_guide/aruco_markers/markers_detection.md
index 3851cb4..9a3bc9f 100644
--- a/docs/user_guide/aruco_markers/markers_detection.md
+++ b/docs/user_guide/aruco_markers/markers_detection.md
@@ -29,14 +29,14 @@ Here is [DetectorParameters](../../../argaze/#argaze.ArUcoMarkers.ArUcoDetector.
}
```
-The [ArUcoDetector](../../../argaze/#argaze.ArUcoMarkers.ArUcoDetector.ArUcoDetector) processes frame to detect markers and allows to draw detection results onto it:
+The [ArUcoDetector](../../../argaze/#argaze.ArUcoMarkers.ArUcoDetector.ArUcoDetector) processes image to detect markers and allows to draw detection results onto it:
``` python
-# Detect markers into a frame and draw them
-aruco_detector.detect_markers(frame)
-aruco_detector.draw_detected_markers(frame)
+# Detect markers into image and draw them
+aruco_detector.detect_markers(image)
+aruco_detector.draw_detected_markers(image)
-# Get corners position into frame related to each detected markers
+# Get corners position into image related to each detected markers
for marker_id, marker in aruco_detector.detected_markers.items():
print(f'marker {marker_id} corners: ', marker.corners)
diff --git a/docs/user_guide/timestamped_data/introduction.md b/docs/user_guide/timestamped_data/introduction.md
index ed13d85..a36daca 100644
--- a/docs/user_guide/timestamped_data/introduction.md
+++ b/docs/user_guide/timestamped_data/introduction.md
@@ -1,6 +1,6 @@
Timestamped data
================
-Working with wearable eye tracker devices implies to handle various timestamped data like frames, gaze positions, pupils diameter, fixations, saccades, ...
+Working with wearable eye tracker devices implies to handle various timestamped data like gaze positions, pupils diameter, fixations, saccades, ...
This section mainly refers to [DataStructures.TimeStampedBuffer](../../../argaze/#argaze.DataStructures.TimeStampedBuffer) class.
diff --git a/docs/user_guide/utils/demonstrations_scripts.md b/docs/user_guide/utils/demonstrations_scripts.md
index adcc8b3..5de2927 100644
--- a/docs/user_guide/utils/demonstrations_scripts.md
+++ b/docs/user_guide/utils/demonstrations_scripts.md
@@ -11,7 +11,7 @@ Collection of command-line scripts for demonstration purpose.
## AR environment demonstration
-Load AR environment from **setup.json** file, detect ArUco markers into camera device (-d DEVICE) frames and estimate envirnoment pose.
+Load AR environment from **setup.json** file, detect ArUco markers into camera device (-d DEVICE) images and estimate envirnoment pose.
```shell
python ./src/argaze/utils/demo_ar_features_run.py -d DEVICE
diff --git a/docs/user_guide/utils/ready-made_scripts.md b/docs/user_guide/utils/ready-made_scripts.md
index 035d697..afc5749 100644
--- a/docs/user_guide/utils/ready-made_scripts.md
+++ b/docs/user_guide/utils/ready-made_scripts.md
@@ -36,7 +36,7 @@ python ./src/argaze/utils/camera_calibrate.py 7 5 5 3 DICT_APRILTAG_16h5 -d DEVI
## ArUco scene exporter
-Load a MOVIE with ArUco markers inside and select a frame into it, detect ArUco markers belonging to DICT_APRILTAG_16h5 dictionary with 5cm size into the selected frame thanks to given OPTIC_PARAMETERS and DETECTOR_PARAMETERS then, export detected ArUco markers scene as .obj file into an *./src/argaze/utils/_export/scenes* folder.
+Load a MOVIE with ArUco markers inside and select image into it, detect ArUco markers belonging to DICT_APRILTAG_16h5 dictionary with 5cm size into the selected image thanks to given OPTIC_PARAMETERS and DETECTOR_PARAMETERS then, export detected ArUco markers scene as .obj file into an *./src/argaze/utils/_export/scenes* folder.
```shell
python ./src/argaze/utils/aruco_markers_scene_export.py MOVIE DICT_APRILTAG_16h5 5 OPTIC_PARAMETERS DETECTOR_PARAMETERS -o ./src/argaze/utils/_export/scenes
diff --git a/src/argaze.test/ArUcoMarkers/ArUcoDetector.py b/src/argaze.test/ArUcoMarkers/ArUcoDetector.py
index 750aaa5..403d1a6 100644
--- a/src/argaze.test/ArUcoMarkers/ArUcoDetector.py
+++ b/src/argaze.test/ArUcoMarkers/ArUcoDetector.py
@@ -89,10 +89,10 @@ class TestArUcoDetectorClass(unittest.TestCase):
# Load picture Full HD to test ArUcoMarker detection
current_directory = os.path.dirname(os.path.abspath(__file__))
- frame = cv.imread(os.path.join(current_directory, 'utils/full_hd_marker.png'))
+ image = cv.imread(os.path.join(current_directory, 'utils/full_hd_marker.png'))
# Check ArUcoMarker detection
- aruco_detector.detect_markers(frame)
+ aruco_detector.detect_markers(image)
self.assertEqual(aruco_detector.detected_markers_number, 1)
@@ -126,10 +126,10 @@ class TestArUcoDetectorClass(unittest.TestCase):
# Load picture Full HD to test ArUcoMarker board detection
current_directory = os.path.dirname(os.path.abspath(__file__))
- frame = cv.imread(os.path.join(current_directory, 'utils/full_hd_board.png'))
+ image = cv.imread(os.path.join(current_directory, 'utils/full_hd_board.png'))
# Check ArUcoMarker board detection
- aruco_detector.detect_board(frame, aruco_board, aruco_board.markers_number)
+ aruco_detector.detect_board(image, aruco_board, aruco_board.markers_number)
self.assertEqual(aruco_detector.board_corners_number, aruco_board.corners_number)
self.assertEqual(len(aruco_detector.board_corners), 24)
diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py
index 0d2062d..4aedb2b 100644
--- a/src/argaze/ArFeatures.py
+++ b/src/argaze/ArFeatures.py
@@ -344,7 +344,7 @@ class ArScene():
def build_aruco_aoi_scene(self, detected_markers) -> AOI2DScene.AOI2DScene:
"""
- Build AOI scene from ArUco markers into frame as defined in aruco_aoi dictionary.
+ Build AOI scene from detected ArUco markers as defined in aruco_aoi dictionary.
Returns:
built AOI 2D scene
@@ -385,24 +385,24 @@ class ArScene():
return AOI2DScene.AOI2DScene(aruco_aoi_scene)
- def draw_axis(self, frame: numpy.array):
+ def draw_axis(self, image: numpy.array):
"""
- Draw scene axis into frame.
+ Draw scene axis into image.
Parameters:
- frame: where to draw
+ image: where to draw
"""
- self.aruco_scene.draw_axis(frame, self._environment.aruco_detector.optic_parameters.K, self._environment.aruco_detector.optic_parameters.D)
+ self.aruco_scene.draw_axis(image, self._environment.aruco_detector.optic_parameters.K, self._environment.aruco_detector.optic_parameters.D)
- def draw_places(self, frame: numpy.array):
+ def draw_places(self, image: numpy.array):
"""
- Draw scene places into frame.
+ Draw scene places into image.
Parameters:
- frame: where to draw
+ image: where to draw
"""
- self.aruco_scene.draw_places(frame, self._environment.aruco_detector.optic_parameters.K, self._environment.aruco_detector.optic_parameters.D)
+ self.aruco_scene.draw_places(image, self._environment.aruco_detector.optic_parameters.K, self._environment.aruco_detector.optic_parameters.D)
diff --git a/src/argaze/ArUcoMarkers/ArUcoDetector.py b/src/argaze/ArUcoMarkers/ArUcoDetector.py
index 5891f16..b9ee077 100644
--- a/src/argaze/ArUcoMarkers/ArUcoDetector.py
+++ b/src/argaze/ArUcoMarkers/ArUcoDetector.py
@@ -128,7 +128,7 @@ class ArUcoDetector():
"""Size of ArUco markers to detect in centimeter."""
optic_parameters: ArUcoOpticCalibrator.OpticParameters = field(default_factory=ArUcoOpticCalibrator.OpticParameters)
- """Optic parameters to use for ArUco detection into frame."""
+ """Optic parameters to use for ArUco detection into image."""
parameters: DetectorParameters = field(default_factory=DetectorParameters)
"""ArUco detector parameters."""
@@ -175,10 +175,10 @@ class ArUcoDetector():
return output
- def detect_markers(self, frame: numpy.array):
- """Detect all ArUco markers into a frame.
+ def detect_markers(self, image: numpy.array):
+ """Detect all ArUco markers into an image.
- .. danger:: DON'T MIRROR FRAME
+ .. danger:: DON'T MIRROR IMAGE
It makes the markers detection to fail.
"""
@@ -186,7 +186,7 @@ class ArUcoDetector():
self.__detected_markers, self.__detected_markers_corners, self.__detected_markers_ids = {}, [], []
# Detect markers into gray picture
- self.__detected_markers_corners, self.__detected_markers_ids, _ = aruco.detectMarkers(cv.cvtColor(frame, cv.COLOR_BGR2GRAY), self.dictionary.markers, parameters = self.parameters.internal)
+ self.__detected_markers_corners, self.__detected_markers_ids, _ = aruco.detectMarkers(cv.cvtColor(image, cv.COLOR_BGR2GRAY), self.dictionary.markers, parameters = self.parameters.internal)
# Is there detected markers ?
if len(self.__detected_markers_corners) > 0:
@@ -262,22 +262,22 @@ class ArUcoDetector():
return len(list(self.__detected_markers.keys()))
- def draw_detected_markers(self, frame: numpy.array):
+ def draw_detected_markers(self, image: numpy.array):
"""Draw traked markers."""
for marker_id, marker in self.__detected_markers.items():
- marker.draw(frame, self.optic_parameters.K, self.optic_parameters.D)
+ marker.draw(image, self.optic_parameters.K, self.optic_parameters.D)
- def detect_board(self, frame: numpy.array, board, expected_markers_number):
- """Detect ArUco markers board in frame setting up the number of detected markers needed to agree detection.
+ def detect_board(self, image: numpy.array, board, expected_markers_number):
+ """Detect ArUco markers board in image setting up the number of detected markers needed to agree detection.
- .. danger:: DON'T MIRROR FRAME
+ .. danger:: DON'T MIRROR IMAGE
It makes the markers detection to fail.
"""
# detect markers from gray picture
- gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
+ gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
self.__detected_markers_corners, self.__detected_markers_ids, _ = aruco.detectMarkers(gray, self.dictionary.markers, parameters = self.parameters.internal)
# if all board markers are detected
@@ -293,12 +293,12 @@ class ArUcoDetector():
self.__board_corners = []
self.__board_corners_ids = []
- def draw_board(self, frame: numpy.array):
- """Draw detected board corners in frame."""
+ def draw_board(self, image: numpy.array):
+ """Draw detected board corners in image."""
if self.__board != None:
- cv.drawChessboardCorners(frame, ((self.__board.size[0] - 1 ), (self.__board.size[1] - 1)), self.__board_corners, True)
+ cv.drawChessboardCorners(image, ((self.__board.size[0] - 1 ), (self.__board.size[1] - 1)), self.__board_corners, True)
def reset_detection_metrics(self):
"""Enable marker detection metrics."""
diff --git a/src/argaze/ArUcoMarkers/ArUcoMarker.py b/src/argaze/ArUcoMarkers/ArUcoMarker.py
index 8b59166..3a13c10 100644
--- a/src/argaze/ArUcoMarkers/ArUcoMarker.py
+++ b/src/argaze/ArUcoMarkers/ArUcoMarker.py
@@ -41,7 +41,7 @@ class ArUcoMarker():
"""Estimated 3D corners positions in camera world referential."""
color: tuple = field(init=False, repr=False, default_factory=lambda : (0, 255, 0))
- """Color used to draw marker on frame."""
+ """Color used to draw marker on image."""
@property
def center(self) -> numpy.array:
@@ -59,15 +59,15 @@ class ArUcoMarker():
return numpy.repeat(matrix, 3).reshape(dimension, dimension, 3)
- def draw(self, frame: numpy.array, K, D):
- """Draw marker in frame."""
+ def draw(self, image: numpy.array, K, D):
+ """Draw marker in image."""
# Draw marker axis if pose has been estimated
if self.translation.size == 3 and self.rotation.size == 9:
- cv.drawFrameAxes(frame, numpy.array(K), numpy.array(D), self.rotation, self.translation, self.size)
+ cv.drawFrameAxes(image, numpy.array(K), numpy.array(D), self.rotation, self.translation, self.size)
- aruco.drawDetectedMarkers(frame, [self.corners], numpy.array([self.identifier]), self.color)
+ aruco.drawDetectedMarkers(image, [self.corners], numpy.array([self.identifier]), self.color)
def save(self, destination_folder, dpi):
"""Save marker image as .png file into a destination folder."""
diff --git a/src/argaze/ArUcoMarkers/ArUcoOpticCalibrator.py b/src/argaze/ArUcoMarkers/ArUcoOpticCalibrator.py
index ed33c95..ec55e44 100644
--- a/src/argaze/ArUcoMarkers/ArUcoOpticCalibrator.py
+++ b/src/argaze/ArUcoMarkers/ArUcoOpticCalibrator.py
@@ -30,7 +30,7 @@ class OpticParameters():
"""Root Mean Square error of calibration."""
dimensions: numpy.array = field(default_factory=lambda : numpy.array([0, 0]))
- """Frame dimensions in pixels from which the calibration have been done."""
+ """Image dimensions in pixels from which the calibration have been done."""
K: numpy.array = field(default_factory=lambda : K0)
"""Intrinsic parameters matrix (focal lengths and principal point)."""
@@ -63,7 +63,7 @@ class OpticParameters():
return output
- def draw(self, frame: numpy.array, width:float, height:float, z:float, color=(0, 0, 255)):
+ def draw(self, image: numpy.array, width:float, height:float, z:float, color=(0, 0, 255)):
"""Draw grid to display K and D"""
# Edit 3D grid
@@ -80,7 +80,7 @@ class OpticParameters():
# Ignore point out out field
try:
- cv2.circle(frame, point.astype(int)[0], 1, color, -1)
+ cv2.circle(image, point.astype(int)[0], 1, color, -1)
except:
pass
@@ -98,7 +98,7 @@ class ArUcoOpticCalibrator():
"""Retrieve K and D parameters from stored calibration data.
Parameters:
- dimensions: camera frame dimensions
+ dimensions: camera image dimensions
Returns:
Optic parameters
diff --git a/src/argaze/ArUcoMarkers/ArUcoScene.py b/src/argaze/ArUcoMarkers/ArUcoScene.py
index 3783660..b9c20e2 100644
--- a/src/argaze/ArUcoMarkers/ArUcoScene.py
+++ b/src/argaze/ArUcoMarkers/ArUcoScene.py
@@ -617,7 +617,7 @@ class ArUcoScene():
self._rotation = rmat
- def draw_axis(self, frame: numpy.array, K, D, consistency=2):
+ def draw_axis(self, image: numpy.array, K, D, consistency=2):
"""Draw scene axis according a consistency score."""
l = self.marker_size / 2
@@ -634,15 +634,15 @@ class ArUcoScene():
axisPoints, _ = cv.projectPoints(axisPoints, self._rotation, self._translation, numpy.array(K), numpy.array(D))
axisPoints = axisPoints.astype(int)
- cv.line(frame, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (n,n,f), 6) # X (red)
- cv.line(frame, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (n,f,n), 6) # Y (green)
- cv.line(frame, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (f,n,n), 6) # Z (blue)
+ cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (n,n,f), 6) # X (red)
+ cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (n,f,n), 6) # Y (green)
+ cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (f,n,n), 6) # Z (blue)
# Ignore errors due to out of field axis: their coordinate are larger than int32 limitations.
except cv.error:
pass
- def draw_places(self, frame: numpy.array, K, D, consistency=2):
+ def draw_places(self, image: numpy.array, K, D, consistency=2):
"""Draw scene places and their axis according a consistency score."""
l = self.marker_size / 2
@@ -664,19 +664,19 @@ class ArUcoScene():
axisPoints, _ = cv.projectPoints(axisPoints, self._rotation, self._translation, numpy.array(K), numpy.array(D))
axisPoints = axisPoints.astype(int)
- cv.line(frame, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (n,n,f), 6) # X (red)
- cv.line(frame, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (n,f,n), 6) # Y (green)
- cv.line(frame, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (f,n,n), 6) # Z (blue)
+ cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()), (n,n,f), 6) # X (red)
+ cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[1].ravel()), (n,f,n), 6) # Y (green)
+ cv.line(image, tuple(axisPoints[3].ravel()), tuple(axisPoints[2].ravel()), (f,n,n), 6) # Z (blue)
# Draw place
placePoints = (T + numpy.float32([R.dot([-l, -l, 0]), R.dot([l, -l, 0]), R.dot([l, l, 0]), R.dot([-l, l, 0])])).reshape(-1, 3)
placePoints, _ = cv.projectPoints(placePoints, self._rotation, self._translation, numpy.array(K), numpy.array(D))
placePoints = placePoints.astype(int)
- cv.line(frame, tuple(placePoints[0].ravel()), tuple(placePoints[1].ravel()), (f,f,f), 3)
- cv.line(frame, tuple(placePoints[1].ravel()), tuple(placePoints[2].ravel()), (f,f,f), 3)
- cv.line(frame, tuple(placePoints[2].ravel()), tuple(placePoints[3].ravel()), (f,f,f), 3)
- cv.line(frame, tuple(placePoints[3].ravel()), tuple(placePoints[0].ravel()), (f,f,f), 3)
+ cv.line(image, tuple(placePoints[0].ravel()), tuple(placePoints[1].ravel()), (f,f,f), 3)
+ cv.line(image, tuple(placePoints[1].ravel()), tuple(placePoints[2].ravel()), (f,f,f), 3)
+ cv.line(image, tuple(placePoints[2].ravel()), tuple(placePoints[3].ravel()), (f,f,f), 3)
+ cv.line(image, tuple(placePoints[3].ravel()), tuple(placePoints[0].ravel()), (f,f,f), 3)
# Ignore errors due to out of field places: their coordinate are larger than int32 limitations.
except cv.error:
diff --git a/src/argaze/AreaOfInterest/AOI2DScene.py b/src/argaze/AreaOfInterest/AOI2DScene.py
index 69b1391..1d4624a 100644
--- a/src/argaze/AreaOfInterest/AOI2DScene.py
+++ b/src/argaze/AreaOfInterest/AOI2DScene.py
@@ -26,15 +26,15 @@ class AOI2DScene(AOIFeatures.AOIScene):
super().__init__(2, aois_2d)
- def draw(self, frame: numpy.array, exclude=[], color=(0, 255, 255)):
- """Draw AOI polygons on frame."""
+ def draw(self, image: numpy.array, exclude=[], color=(0, 255, 255)):
+ """Draw AOI polygons on image."""
for name, aoi in self.items():
if name in exclude:
continue
- aoi.draw(frame, color)
+ aoi.draw(image, color)
def raycast(self, pointer:tuple) -> Tuple[str, "AOIFeatures.AreaOfInterest", bool]:
"""Iterate over aoi to know which aoi is matching the given pointer position.
@@ -50,7 +50,7 @@ class AOI2DScene(AOIFeatures.AOIScene):
yield name, aoi, matching
- def draw_raycast(self, frame: numpy.array, pointer:tuple, exclude=[], base_color=(0, 0, 255), matching_color=(0, 255, 0)):
+ def draw_raycast(self, image: numpy.array, pointer:tuple, exclude=[], base_color=(0, 0, 255), matching_color=(0, 255, 0)):
"""Draw AOIs with their matching status."""
for name, aoi, matching in self.raycast(pointer):
@@ -63,10 +63,10 @@ class AOI2DScene(AOIFeatures.AOIScene):
if matching:
top_left_corner_pixel = numpy.rint(aoi.clockwise()[0]).astype(int)
- cv2.putText(frame, name, top_left_corner_pixel, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, name, top_left_corner_pixel, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
# Draw form
- aoi.draw(frame, color)
+ aoi.draw(image, color)
def circlecast(self, center:tuple, radius:float) -> Tuple[str, "AOIFeatures.AreaOfInterest", numpy.array, float, float]:
"""Iterate over areas to know which aoi is matching circle.
@@ -84,7 +84,7 @@ class AOI2DScene(AOIFeatures.AOIScene):
yield name, aoi, matching_region, aoi_ratio, circle_ratio
- def draw_circlecast(self, frame: numpy.array, center:tuple, radius:float, exclude=[], base_color=(0, 0, 255), matching_color=(0, 255, 0)):
+ def draw_circlecast(self, image: numpy.array, center:tuple, radius:float, exclude=[], base_color=(0, 0, 255), matching_color=(0, 255, 0)):
"""Draw AOIs with their matching status and matching region."""
for name, aoi, matching_region, aoi_ratio, circle_ratio in self.circlecast(center, radius):
@@ -94,7 +94,7 @@ class AOI2DScene(AOIFeatures.AOIScene):
# Draw matching region
if aoi_ratio > 0:
- matching_region.draw(frame, base_color, 4)
+ matching_region.draw(image, base_color, 4)
# TODO : Externalise this criteria
matching = aoi_ratio > 0.25 or circle_ratio > 0.5
@@ -104,13 +104,13 @@ class AOI2DScene(AOIFeatures.AOIScene):
if matching:
top_left_corner_pixel = numpy.rint(aoi.clockwise()[0]).astype(int)
- cv2.putText(frame, name, top_left_corner_pixel, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, name, top_left_corner_pixel, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
# Draw matching region
- matching_region.draw(frame, matching_color, 4)
+ matching_region.draw(image, matching_color, 4)
# Draw form
- aoi.draw(frame, color)
+ aoi.draw(image, color)
def reframe(self, aoi_name: str, size: tuple) -> AOI2DSceneType:
"""
diff --git a/src/argaze/AreaOfInterest/AOI3DScene.py b/src/argaze/AreaOfInterest/AOI3DScene.py
index 2272206..cb5b5a3 100644
--- a/src/argaze/AreaOfInterest/AOI3DScene.py
+++ b/src/argaze/AreaOfInterest/AOI3DScene.py
@@ -200,10 +200,10 @@ class AOI3DScene(AOIFeatures.AOIScene):
D: camera distorsion coefficients vector
!!! danger
- Camera distorsion coefficients could projects points which are far from the frame into it.
+ Camera distorsion coefficients could projects points which are far from image frame into it.
!!! note
- As gaze is mainly focusing on the frame center, where the distorsion is low, it could be acceptable to not use camera distorsion.
+ As gaze is mainly focusing on frame center, where the distorsion is low, it could be acceptable to not use camera distorsion.
"""
aoi2D_scene = AOI2DScene.AOI2DScene()
diff --git a/src/argaze/AreaOfInterest/AOIFeatures.py b/src/argaze/AreaOfInterest/AOIFeatures.py
index 548382c..07ef7c4 100644
--- a/src/argaze/AreaOfInterest/AOIFeatures.py
+++ b/src/argaze/AreaOfInterest/AOIFeatures.py
@@ -201,8 +201,8 @@ class AreaOfInterest(numpy.ndarray):
return empty_array, 0., 0.
- def draw(self, frame: numpy.array, color, border_size=1):
- """Draw 2D AOI into frame.
+ def draw(self, image: numpy.array, color, border_size=1):
+ """Draw 2D AOI into image.
!!! warning
Available for 2D AOI only."""
@@ -212,13 +212,13 @@ class AreaOfInterest(numpy.ndarray):
# Draw form
pixels = numpy.rint(self).astype(int)
- cv2.line(frame, pixels[-1], pixels[0], color, border_size)
+ cv2.line(image, pixels[-1], pixels[0], color, border_size)
for A, B in zip(pixels, pixels[1:]):
- cv2.line(frame, A, B, color, border_size)
+ cv2.line(image, A, B, color, border_size)
# Draw center
center_pixel = numpy.rint(self.center).astype(int)
- cv2.circle(frame, center_pixel, 1, color, -1)
+ cv2.circle(image, center_pixel, 1, color, -1)
AOISceneType = TypeVar('AOIScene', bound="AOIScene")
# Type definition for type annotation convenience
@@ -373,7 +373,7 @@ AOIFrameType = TypeVar('AOIFrame', bound="AOIFrame")
# Type definition for type annotation convenience
class AOIFrame():
- """Define frame to draw into 2D AOI."""
+ """Define image to draw into 2D AOI."""
def __init__(self, aoi: AreaOfInterestType, size: tuple):
"""
@@ -392,7 +392,7 @@ class AOIFrame():
self.heatmap_init()
def point_spread(self, point: tuple, sigma: float):
- """Draw gaussian point spread into frame."""
+ """Draw gaussian point spread into image."""
div = -2 * sigma**2
@@ -429,7 +429,7 @@ class AOIFrame():
self.__point_spread_buffer.append(point_spread)
- # Remove oldest point spread buffer frame
+ # Remove oldest point spread buffer image
if len(self.__point_spread_buffer) > self.__point_spread_buffer_size:
self.__point_spread_sum -= self.__point_spread_buffer.pop(0)
diff --git a/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py b/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py
index c79b4fc..1883d69 100644
--- a/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py
+++ b/src/argaze/GazeAnalysis/DispersionThresholdIdentification.py
@@ -78,11 +78,11 @@ class Fixation(GazeFeatures.Fixation):
return self
- def draw(self, frame: numpy.array, color=(127, 127, 127), border_color=(255, 255, 255)):
- """Draw fixation into frame."""
+ def draw(self, image: numpy.array, color=(127, 127, 127), border_color=(255, 255, 255)):
+ """Draw fixation into image."""
- cv2.circle(frame, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), color, -1)
- cv2.circle(frame, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), border_color, len(self.positions))
+ cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), color, -1)
+ cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), border_color, len(self.positions))
@dataclass(frozen=True)
class Saccade(GazeFeatures.Saccade):
@@ -91,13 +91,13 @@ class Saccade(GazeFeatures.Saccade):
def __post_init__(self):
super().__post_init__()
- def draw(self, frame: numpy.array, color=(255, 255, 255)):
- """Draw saccade into frame."""
+ def draw(self, image: numpy.array, color=(255, 255, 255)):
+ """Draw saccade into image."""
_, start_position = self.positions.first
_, last_position = self.positions.last
- cv2.line(frame, (int(start_position[0]), int(start_position[1])), (int(last_position[0]), int(last_position[1])), color, 2)
+ cv2.line(image, (int(start_position[0]), int(start_position[1])), (int(last_position[0]), int(last_position[1])), color, 2)
@dataclass
class GazeMovementIdentifier(GazeFeatures.GazeMovementIdentifier):
diff --git a/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py b/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py
index 32af1c5..7d6c7b2 100644
--- a/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py
+++ b/src/argaze/GazeAnalysis/VelocityThresholdIdentification.py
@@ -78,11 +78,11 @@ class Fixation(GazeFeatures.Fixation):
return self
- def draw(self, frame: numpy.array, color=(127, 127, 127), border_color=(255, 255, 255)):
- """Draw fixation into frame."""
+ def draw(self, image: numpy.array, color=(127, 127, 127), border_color=(255, 255, 255)):
+ """Draw fixation into image."""
- cv2.circle(frame, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), color, -1)
- cv2.circle(frame, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), border_color, len(self.positions))
+ cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), color, -1)
+ cv2.circle(image, (int(self.focus[0]), int(self.focus[1])), int(self.deviation_max), border_color, len(self.positions))
@dataclass(frozen=True)
class Saccade(GazeFeatures.Saccade):
@@ -91,13 +91,13 @@ class Saccade(GazeFeatures.Saccade):
def __post_init__(self):
super().__post_init__()
- def draw(self, frame: numpy.array, color=(255, 255, 255)):
- """Draw saccade into frame."""
+ def draw(self, image: numpy.array, color=(255, 255, 255)):
+ """Draw saccade into image."""
_, start_position = self.positions.first
_, last_position = self.positions.last
- cv2.line(frame, (int(start_position[0]), int(start_position[1])), (int(last_position[0]), int(last_position[1])), color, 2)
+ cv2.line(image, (int(start_position[0]), int(start_position[1])), (int(last_position[0]), int(last_position[1])), color, 2)
@dataclass
class GazeMovementIdentifier(GazeFeatures.GazeMovementIdentifier):
diff --git a/src/argaze/GazeFeatures.py b/src/argaze/GazeFeatures.py
index cf1f7b8..d4307a2 100644
--- a/src/argaze/GazeFeatures.py
+++ b/src/argaze/GazeFeatures.py
@@ -88,7 +88,7 @@ class GazePosition():
else:
return distance < self.precision
- def draw(self, frame: numpy.array, color=(0, 255, 255), draw_precision=True):
+ def draw(self, image: numpy.array, color=(0, 255, 255), draw_precision=True):
"""Draw gaze position point and precision circle."""
if self.valid:
@@ -96,11 +96,11 @@ class GazePosition():
int_value = (int(self.value[0]), int(self.value[1]))
# Draw point at position
- cv2.circle(frame, int_value, 2, color, -1)
+ cv2.circle(image, int_value, 2, color, -1)
# Draw precision circle
if self.precision > 0 and draw_precision:
- cv2.circle(frame, int_value, round(self.precision), color, 1)
+ cv2.circle(image, int_value, round(self.precision), color, 1)
class UnvalidGazePosition(GazePosition):
"""Unvalid gaze position."""
@@ -240,7 +240,7 @@ class GazeMovement():
return output
- def draw_positions(self, frame: numpy.array, color=(0, 55, 55)):
+ def draw_positions(self, image: numpy.array, color=(0, 55, 55)):
"""Draw gaze movement positions"""
gaze_positions = self.positions.copy()
@@ -251,10 +251,10 @@ class GazeMovement():
ts_next, next_gaze_position = gaze_positions.first
# Draw start gaze
- start_gaze_position.draw(frame, draw_precision=False)
+ start_gaze_position.draw(image, draw_precision=False)
# Draw movement from start to next
- cv2.line(frame, start_gaze_position, next_gaze_position, color, 1)
+ cv2.line(image, start_gaze_position, next_gaze_position, color, 1)
FixationType = TypeVar('Fixation', bound="Fixation")
# Type definition for type annotation convenience
@@ -274,8 +274,8 @@ class Fixation(GazeMovement):
raise NotImplementedError('merge() method not implemented')
- def draw(self, frame: numpy.array, color):
- """Draw fixation into frame."""
+ def draw(self, image: numpy.array, color):
+ """Draw fixation into image."""
raise NotImplementedError('draw() method not implemented')
@@ -291,8 +291,8 @@ class Saccade(GazeMovement):
super().__post_init__()
- def draw(self, frame: numpy.array, color):
- """Draw saccade into frame."""
+ def draw(self, image: numpy.array, color):
+ """Draw saccade into image."""
raise NotImplementedError('draw() method not implemented')
@@ -519,17 +519,17 @@ class ScanPath(list):
self.__last_fixation = fixation
- def draw(self, frame: numpy.array, fixation_color=(255, 255, 255), saccade_color=(255, 255, 255), deepness=0):
- """Draw scan path into frame."""
+ def draw(self, image: numpy.array, fixation_color=(255, 255, 255), saccade_color=(255, 255, 255), deepness=0):
+ """Draw scan path into image."""
last_step = None
for step in self[-deepness:]:
if last_step != None:
- cv2.line(frame, (int(last_step.first_fixation.focus[0]), int(last_step.first_fixation.focus[1])), (int(step.first_fixation.focus[0]), int(step.first_fixation.focus[1])), saccade_color, 2)
+ cv2.line(image, (int(last_step.first_fixation.focus[0]), int(last_step.first_fixation.focus[1])), (int(step.first_fixation.focus[0]), int(step.first_fixation.focus[1])), saccade_color, 2)
- last_step.first_fixation.draw(frame, fixation_color)
+ last_step.first_fixation.draw(image, fixation_color)
last_step = step
diff --git a/src/argaze/utils/aruco_markers_scene_export.py b/src/argaze/utils/aruco_markers_scene_export.py
index 8045d24..cf617f7 100644
--- a/src/argaze/utils/aruco_markers_scene_export.py
+++ b/src/argaze/utils/aruco_markers_scene_export.py
@@ -19,7 +19,7 @@ import numpy
def main():
"""
- Load a movie with ArUco markers inside and select a frame into it, detect ArUco markers belonging to a given dictionary and size into the selected frame thanks to given optic parameters and detector parameters then, export detected ArUco scene as .obj file.
+ Load a movie with ArUco markers inside and select image into it, detect ArUco markers belonging to a given dictionary and size into the selected image thanks to given optic parameters and detector parameters then, export detected ArUco scene as .obj file.
"""
# Manage arguments
@@ -38,8 +38,8 @@ def main():
video_capture = cv2.VideoCapture(args.movie)
video_fps = video_capture.get(cv2.CAP_PROP_FPS)
- frame_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
- frame_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
+ image_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
+ image_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Load ArUco dictionary
aruco_dictionary = ArUcoMarkersDictionary.ArUcoMarkersDictionary(args.dictionary)
@@ -63,10 +63,10 @@ def main():
# Enable exit signal handler
exit = MiscFeatures.ExitSignalHandler()
- # Init frame selection
- current_frame_index = -1
- _, current_frame = video_capture.read()
- next_frame_index = int(args.start * video_fps)
+ # Init image selection
+ current_image_index = -1
+ _, current_image = video_capture.read()
+ next_image_index = int(args.start * video_fps)
refresh = False
# Hide help
@@ -74,23 +74,23 @@ def main():
while not exit.status():
- # Select a new frame and detect markers once
- if next_frame_index != current_frame_index or refresh:
+ # Select a new image and detect markers once
+ if next_image_index != current_image_index or refresh:
- video_capture.set(cv2.CAP_PROP_POS_FRAMES, next_frame_index)
+ video_capture.set(cv2.CAP_PROP_POS_FRAMES, next_image_index)
- success, video_frame = video_capture.read()
+ success, video_image = video_capture.read()
if success:
# Refresh once
refresh = False
- current_frame_index = video_capture.get(cv2.CAP_PROP_POS_FRAMES) - 1
- current_frame_time = video_capture.get(cv2.CAP_PROP_POS_MSEC)
+ current_image_index = video_capture.get(cv2.CAP_PROP_POS_FRAMES) - 1
+ current_image_time = video_capture.get(cv2.CAP_PROP_POS_MSEC)
# Detect markers
- aruco_detector.detect_markers(video_frame)
+ aruco_detector.detect_markers(video_image)
# Estimate markers pose
aruco_detector.estimate_markers_pose()
@@ -101,48 +101,48 @@ def main():
print(aruco_scene)
# Write scene detected markers
- cv2.putText(video_frame, f'{list(aruco_detector.detected_markers.keys())}', (20, frame_height-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'{list(aruco_detector.detected_markers.keys())}', (20, image_height-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
# Write timing
- cv2.putText(video_frame, f'Time: {int(current_frame_time)} ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'Time: {int(current_image_time)} ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
- # Copy frame
- current_frame = video_frame.copy()
+ # Copy image
+ current_image = video_image.copy()
- # Keep last frame
+ # Keep last image
else:
- video_frame = current_frame.copy()
+ video_image = current_image.copy()
# Draw detected markers
- aruco_detector.draw_detected_markers(video_frame)
+ aruco_detector.draw_detected_markers(video_image)
# Write documentation
- cv2.putText(video_frame, f'Press \'h\' for help', (950, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'Press \'h\' for help', (950, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
if draw_help:
- cv2.rectangle(video_frame, (0, 50), (500, 300), (127, 127, 127), -1)
- cv2.putText(video_frame, f'> Left arrow: previous frame', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_frame, f'> Right arrow: next frame', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_frame, f'> Ctrl+s: export ArUco scene', (20, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.rectangle(video_image, (0, 50), (500, 300), (127, 127, 127), -1)
+ cv2.putText(video_image, f'> Left arrow: previous image', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'> Right arrow: next image', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'> Ctrl+s: export ArUco scene', (20, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
key_pressed = cv2.waitKey(10)
#if key_pressed != -1:
# print(key_pressed)
- # Select previous frame with left arrow
+ # Select previous image with left arrow
if key_pressed == 2:
- next_frame_index -= 1
+ next_image_index -= 1
- # Select next frame with right arrow
+ # Select next image with right arrow
if key_pressed == 3:
- next_frame_index += 1
+ next_image_index += 1
- # Clip frame index
- if next_frame_index < 0:
- next_frame_index = 0
+ # Clip image index
+ if next_image_index < 0:
+ next_image_index = 0
# Switch help mode with h key
if key_pressed == 104:
@@ -153,7 +153,7 @@ def main():
if aruco_scene:
- aruco_scene.to_obj(f'{args.output}/{int(current_frame_time)}-aruco_scene.obj')
+ aruco_scene.to_obj(f'{args.output}/{int(current_image_time)}-aruco_scene.obj')
print(f'ArUco scene saved into {args.output}')
else:
@@ -165,12 +165,12 @@ def main():
break
# Display video
- cv2.imshow(window_name, video_frame)
+ cv2.imshow(window_name, video_image)
# Close movie capture
video_capture.release()
- # Stop frame display
+ # Stop image display
cv2.destroyAllWindows()
if __name__ == '__main__':
diff --git a/src/argaze/utils/camera_calibrate.py b/src/argaze/utils/camera_calibrate.py
index c42b721..8b3249b 100644
--- a/src/argaze/utils/camera_calibrate.py
+++ b/src/argaze/utils/camera_calibrate.py
@@ -45,8 +45,8 @@ def main():
# Enable camera video capture
video_capture = cv2.VideoCapture(args.device)
- frame_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
- frame_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
+ image_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
+ image_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Create aruco optic calibrator
aruco_optic_calibrator = ArUcoOpticCalibrator.ArUcoOpticCalibrator()
@@ -57,7 +57,7 @@ def main():
# Create aruco detector
aruco_detector = ArUcoDetector.ArUcoDetector(dictionary=args.dictionary, marker_size=args.marker_size)
- print(f'{frame_width}x{frame_height} pixels camera calibration starts')
+ print(f'{image_width}x{image_height} pixels camera calibration starts')
print("Waiting for calibration board...")
expected_markers_number = aruco_board.markers_number
@@ -66,33 +66,33 @@ def main():
# Capture loop
try:
- # Capture frames with a full displayed board inside
+ # Capture images with a full displayed board inside
while video_capture.isOpened():
- success, video_frame = video_capture.read()
+ success, video_image = video_capture.read()
if success:
# Detect calibration board
- aruco_detector.detect_board(video_frame, aruco_board, expected_markers_number)
+ aruco_detector.detect_board(video_image, aruco_board, expected_markers_number)
# Draw detected markers
- aruco_detector.draw_detected_markers(video_frame)
+ aruco_detector.draw_detected_markers(video_image)
# Draw current calibration data count
- cv2.putText(video_frame, f'Capture: {aruco_optic_calibrator.calibration_data_count}', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
- cv2.imshow('Optic Calibration', video_frame)
+ cv2.putText(video_image, f'Capture: {aruco_optic_calibrator.calibration_data_count}', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
+ cv2.imshow('Optic Calibration', video_image)
# If all board corners are detected
if aruco_detector.board_corners_number == expected_corners_number:
# Draw board corners to notify a capture is done
- aruco_detector.draw_board(video_frame)
+ aruco_detector.draw_board(video_image)
# Append calibration data
aruco_optic_calibrator.store_calibration_data(aruco_detector.board_corners, aruco_detector.board_corners_identifier)
- cv2.imshow('Optic Calibration', video_frame)
+ cv2.imshow('Optic Calibration', video_image)
# Stop calibration by pressing 'Esc' key
if cv2.waitKey(1) == 27:
@@ -102,11 +102,11 @@ def main():
except KeyboardInterrupt:
pass
- # Stop frame display
+ # Stop image display
cv2.destroyAllWindows()
print('\nCalibrating camera...')
- optic_parameters = aruco_optic_calibrator.calibrate(aruco_board, dimensions=(frame_width, frame_height))
+ optic_parameters = aruco_optic_calibrator.calibrate(aruco_board, dimensions=(image_width, image_height))
if optic_parameters:
diff --git a/src/argaze/utils/demo_ar_features_run.py b/src/argaze/utils/demo_ar_features_run.py
index ec42338..990e234 100644
--- a/src/argaze/utils/demo_ar_features_run.py
+++ b/src/argaze/utils/demo_ar_features_run.py
@@ -17,7 +17,7 @@ import numpy
def main():
"""
- Load AR environment from .json file, detect ArUco markers into camera device frames and estimate environment pose.
+ Load AR environment from .json file, detect ArUco markers into camera device images and estimate environment pose.
"""
current_directory = os.path.dirname(os.path.abspath(__file__))
@@ -65,22 +65,22 @@ def main():
# Waiting for 'ctrl+C' interruption
try:
- # Capture frames
+ # Capture images
while video_capture.isOpened():
- # Read video frame
- success, video_frame = video_capture.read()
+ # Read video image
+ success, video_image = video_capture.read()
- # Create screen frame
- screen_frame = numpy.zeros((240, 320, 3)).astype(numpy.uint8)
+ # Create screen image
+ screen_image = numpy.zeros((240, 320, 3)).astype(numpy.uint8)
if success:
# Detect markers
- demo_environment.aruco_detector.detect_markers(video_frame)
+ demo_environment.aruco_detector.detect_markers(video_image)
# Draw detected markers
- demo_environment.aruco_detector.draw_detected_markers(video_frame)
+ demo_environment.aruco_detector.draw_detected_markers(video_image)
# Try to project scene
try:
@@ -97,33 +97,33 @@ def main():
# Estimate scene pose from detected scene markers
tvec, rmat, _, _ = demo_scene.estimate_pose(demo_environment.aruco_detector.detected_markers)
- # Project AOI scene into video frame according estimated pose
+ # Project AOI scene into video image according estimated pose
aoi_scene_projection = demo_scene.project(tvec, rmat)
# Draw AOI scene projection
- aoi_scene_projection.draw(video_frame, color=(255, 255, 255))
+ aoi_scene_projection.draw(video_image, color=(255, 255, 255))
# Project pointer into screen
if aoi_scene_projection[screen_name].contains_point(pointer):
inner_x, inner_y = aoi_scene_projection[screen_name].clockwise().inner_axis(pointer)
- cv2.circle(screen_frame, (int(inner_x * screen_size[0]), int(inner_y * screen_size[1])), 5, (255, 255, 255), -1)
+ cv2.circle(screen_image, (int(inner_x * screen_size[0]), int(inner_y * screen_size[1])), 5, (255, 255, 255), -1)
# Catch exceptions raised by estimate_pose and project methods
except (ArFeatures.PoseEstimationFailed, ArFeatures.SceneProjectionFailed) as e:
- cv2.rectangle(video_frame, (0, 50), (700, 100), (127, 127, 127), -1)
- cv2.putText(video_frame, f'Error: {e}', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.rectangle(video_image, (0, 50), (700, 100), (127, 127, 127), -1)
+ cv2.putText(video_image, f'Error: {e}', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- # Draw video frame
- cv2.imshow(demo_environment.name, video_frame)
+ # Draw video image
+ cv2.imshow(demo_environment.name, video_image)
# Draw screen scene
- screen_scene.draw(screen_frame, color=(255, 255, 255))
+ screen_scene.draw(screen_image, color=(255, 255, 255))
- # Draw screen frame
- cv2.imshow(screen_name, screen_frame)
+ # Draw screen image
+ cv2.imshow(screen_name, screen_image)
# Stop by pressing 'Esc' key
if cv2.waitKey(10) == 27:
@@ -136,7 +136,7 @@ def main():
# Close camera video capture
video_capture.release()
- # Stop frame display
+ # Stop image display
cv2.destroyAllWindows()
if __name__ == '__main__':
diff --git a/src/argaze/utils/demo_gaze_features_run.py b/src/argaze/utils/demo_gaze_features_run.py
index 65952ec..8bd0a88 100644
--- a/src/argaze/utils/demo_gaze_features_run.py
+++ b/src/argaze/utils/demo_gaze_features_run.py
@@ -58,8 +58,8 @@ def main():
# Init gaze processing
gaze_position = GazeFeatures.GazePosition()
- screen_frame = AOIFeatures.AOIFrame(aoi_scene_projection['Screen'], window_size)
- screen_frame.heatmap_init()
+ screen_image = AOIFeatures.AOIFrame(aoi_scene_projection['Screen'], window_size)
+ screen_image.heatmap_init()
enable_heatmap = False
clear_heatmap = False
@@ -150,11 +150,11 @@ def main():
# Clear heatmap
if clear_heatmap:
- screen_frame.heatmap_init(10 if enable_heatmap_buffer else 0)
+ screen_image.heatmap_init(10 if enable_heatmap_buffer else 0)
clear_heatmap = False
# Update heatmap
- screen_frame.heatmap_update(gaze_position.value, sigma=0.05)
+ screen_image.heatmap_update(gaze_position.value, sigma=0.05)
else:
@@ -248,7 +248,7 @@ def main():
# Analyse mouse positions
while True:
- aoi_matrix = aoi_scene_image.copy()
+ image = aoi_scene_image.copy()
# Lock gaze movement identification
gaze_movement_lock.acquire()
@@ -258,52 +258,52 @@ def main():
enable_disable = 'disable' if enable_heatmap else 'enable'
buffer_on_off = 'on' if enable_heatmap_buffer else 'off'
buffer_enable_disable = 'disable' if enable_heatmap_buffer else 'enable'
- cv2.putText(aoi_matrix, f'Heatmap: {on_off} (Press \'h\' key to {enable_disable}), Buffer: {buffer_on_off} (Press \'b\' key to {buffer_enable_disable})', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_heatmap else (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Heatmap: {on_off} (Press \'h\' key to {enable_disable}), Buffer: {buffer_on_off} (Press \'b\' key to {buffer_enable_disable})', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_heatmap else (255, 255, 255), 1, cv2.LINE_AA)
# Draw gaze spread heatmap
if enable_heatmap:
- aoi_matrix = cv2.addWeighted(screen_frame.heatmap, 0.5, aoi_matrix, 1., 0)
+ image = cv2.addWeighted(screen_image.heatmap, 0.5, image, 1., 0)
else:
# Write identification mode
- cv2.putText(aoi_matrix, f'Gaze movement identification mode: {identification_mode} (Press \'m\' key to switch)', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, fixation_color[identification_mode], 1, cv2.LINE_AA)
+ cv2.putText(image, f'Gaze movement identification mode: {identification_mode} (Press \'m\' key to switch)', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, fixation_color[identification_mode], 1, cv2.LINE_AA)
# Write TPM help
on_off = 'on' if enable_tm_analysis else 'off'
display_hide = 'hide' if enable_tm_analysis else 'display'
- cv2.putText(aoi_matrix, f'Transition matrix: {on_off} (Press \'t\' key to {display_hide})', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_tm_analysis else (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Transition matrix: {on_off} (Press \'t\' key to {display_hide})', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_tm_analysis else (255, 255, 255), 1, cv2.LINE_AA)
# Write Kc help
on_off = 'on' if enable_kc_analysis else 'off'
display_hide = 'hide' if enable_kc_analysis else 'display'
- cv2.putText(aoi_matrix, f'coefficient K: {on_off} (Press \'k\' key to {display_hide})', (20, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_kc_analysis else (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'coefficient K: {on_off} (Press \'k\' key to {display_hide})', (20, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_kc_analysis else (255, 255, 255), 1, cv2.LINE_AA)
# Write LZC help
on_off = 'on' if enable_lzc_analysis else 'off'
display_hide = 'hide' if enable_lzc_analysis else 'display'
- cv2.putText(aoi_matrix, f'Lempel-Ziv complexity: {on_off} (Press \'z\' key to {display_hide})', (20, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_lzc_analysis else (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Lempel-Ziv complexity: {on_off} (Press \'z\' key to {display_hide})', (20, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_lzc_analysis else (255, 255, 255), 1, cv2.LINE_AA)
# Write N-Gram help
on_off = 'on' if enable_ngram_analysis else 'off'
display_hide = 'hide' if enable_ngram_analysis else 'display'
- cv2.putText(aoi_matrix, f'Tri-Gram: {on_off} (Press \'n\' key to {display_hide})', (20, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_ngram_analysis else (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Tri-Gram: {on_off} (Press \'n\' key to {display_hide})', (20, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_ngram_analysis else (255, 255, 255), 1, cv2.LINE_AA)
# Write entropy help
on_off = 'on' if enable_entropy_analysis else 'off'
display_hide = 'hide' if enable_entropy_analysis else 'display'
- cv2.putText(aoi_matrix, f'Entropy: {on_off} (Press \'e\' key to {display_hide})', (20, 280), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_entropy_analysis else (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Entropy: {on_off} (Press \'e\' key to {display_hide})', (20, 280), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_entropy_analysis else (255, 255, 255), 1, cv2.LINE_AA)
# Write nni help
on_off = 'on' if enable_nni_analysis else 'off'
display_hide = 'hide' if enable_nni_analysis else 'display'
- cv2.putText(aoi_matrix, f'Nearest neighbor index: {on_off} (Press \'i\' key to {display_hide})', (20, 320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_nni_analysis else (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Nearest neighbor index: {on_off} (Press \'i\' key to {display_hide})', (20, 320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_nni_analysis else (255, 255, 255), 1, cv2.LINE_AA)
# Write xxr help
on_off = 'on' if enable_xxr_analysis else 'off'
display_hide = 'hide' if enable_xxr_analysis else 'display'
- cv2.putText(aoi_matrix, f'Exploit Explore Ratio: {on_off} (Press \'x\' key to {display_hide})', (20, 360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_xxr_analysis else (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Exploit Explore Ratio: {on_off} (Press \'x\' key to {display_hide})', (20, 360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_xxr_analysis else (255, 255, 255), 1, cv2.LINE_AA)
# Check fixation identification
if gaze_movement_identifier[identification_mode].current_fixation != None:
@@ -311,21 +311,21 @@ def main():
current_fixation = gaze_movement_identifier[identification_mode].current_fixation
# Draw looked AOI
- aoi_scene_projection.draw_circlecast(aoi_matrix, current_fixation.focus, current_fixation.deviation_max, base_color=(0, 0, 0), matching_color=(255, 255, 255))
+ aoi_scene_projection.draw_circlecast(image, current_fixation.focus, current_fixation.deviation_max, base_color=(0, 0, 0), matching_color=(255, 255, 255))
# Draw current fixation
- current_fixation.draw(aoi_matrix, color=current_fixation_color)
+ current_fixation.draw(image, color=current_fixation_color)
# Draw current fixation gaze positions
- current_fixation.draw_positions(aoi_matrix)
+ current_fixation.draw_positions(image)
else:
# Draw pointer as gaze position
- gaze_position.draw(aoi_matrix, draw_precision=False)
+ gaze_position.draw(image, draw_precision=False)
# Draw AOI scene projection
- aoi_scene_projection.draw(aoi_matrix, color=(0, 0, 0))
+ aoi_scene_projection.draw(image, color=(0, 0, 0))
# Check saccade identification
if gaze_movement_identifier[identification_mode].current_saccade != None:
@@ -333,10 +333,10 @@ def main():
current_saccade = gaze_movement_identifier[identification_mode].current_saccade
# Draw current saccade gaze positions
- current_saccade.draw_positions(aoi_matrix)
+ current_saccade.draw_positions(image)
# Draw last 10 steps of raw scan path
- raw_scan_path.draw(aoi_matrix, fixation_color=fixation_color[identification_mode], deepness=10)
+ raw_scan_path.draw(image, fixation_color=fixation_color[identification_mode], deepness=10)
# Write last 5 steps of aoi scan path
path = ''
@@ -346,12 +346,12 @@ def main():
path += f'> {aoi_scan_path.current_aoi}'
- cv2.putText(aoi_matrix, path, (20, window_size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, path, (20, window_size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
# Draw transition probability matrix
if enable_tm_analysis:
- cv2.putText(aoi_matrix, f'Transition matrix density: {tm_density:.2f}', (20, window_size[1]-160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Transition matrix density: {tm_density:.2f}', (20, window_size[1]-160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
# Iterate over indexes (departures)
for from_aoi, row in tm_probabilities.iterrows():
@@ -367,66 +367,66 @@ def main():
color = [int(probability*200) + 55, int(probability*200) + 55, int(probability*200) + 55]
- cv2.line(aoi_matrix, start_line, to_center, color, int(probability*10) + 2)
- cv2.line(aoi_matrix, from_center, to_center, [55, 55, 55], 2)
+ cv2.line(image, start_line, to_center, color, int(probability*10) + 2)
+ cv2.line(image, from_center, to_center, [55, 55, 55], 2)
if enable_kc_analysis:
# Write raw Kc analysis
if raw_kc_analysis < 0.:
- cv2.putText(aoi_matrix, f'Raw: Ambient attention', (20, window_size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Raw: Ambient attention', (20, window_size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
elif raw_kc_analysis > 0.:
- cv2.putText(aoi_matrix, f'Raw: Focal attention', (20, window_size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Raw: Focal attention', (20, window_size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)
# Write aoi Kc analysis
if aoi_kc_analysis < 0.:
- cv2.putText(aoi_matrix, f'AOI: Ambient attention', (20, window_size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'AOI: Ambient attention', (20, window_size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
elif aoi_kc_analysis > 0.:
- cv2.putText(aoi_matrix, f'AOI: Focal attention', (20, window_size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(image, f'AOI: Focal attention', (20, window_size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)
# Write LZC
if enable_lzc_analysis:
- cv2.putText(aoi_matrix, f'Lempel-Ziv complexity: {lzc_analysis}', (20, window_size[1]-200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Lempel-Ziv complexity: {lzc_analysis}', (20, window_size[1]-200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
# Write N-Gram
if enable_ngram_analysis:
start = window_size[1] - ((len(ngram_analysis) + 1) * 40)
- cv2.putText(aoi_matrix, f'Tri-Gram:', (window_size[0]-700, start-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Tri-Gram:', (window_size[0]-700, start-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
for i, (ngram, count) in enumerate(ngram_analysis.items()):
trigram = f'{ngram[0]}>{ngram[1]}>{ngram[2]}'
- cv2.putText(aoi_matrix, f'{trigram}: {count}', (window_size[0]-700, start+(i*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'{trigram}: {count}', (window_size[0]-700, start+(i*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
# Write entropy
if enable_entropy_analysis:
- cv2.putText(aoi_matrix, f'Stationary entropy: {entropy_analysis[0]:.3f},', (20, window_size[1]-280), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(aoi_matrix, f'Transition entropy: {entropy_analysis[1]:.3f},', (20, window_size[1]-240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Stationary entropy: {entropy_analysis[0]:.3f},', (20, window_size[1]-280), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Transition entropy: {entropy_analysis[1]:.3f},', (20, window_size[1]-240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
# Write NNI
if enable_nni_analysis:
- cv2.putText(aoi_matrix, f'Nearest neighbor index: {nni_analysis:.3f}', (20, window_size[1]-320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Nearest neighbor index: {nni_analysis:.3f}', (20, window_size[1]-320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
# Write XXR
if enable_xxr_analysis:
- cv2.putText(aoi_matrix, f'Exploit explore ratio: {xxr_analysis:.3f}', (20, window_size[1]-320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Exploit explore ratio: {xxr_analysis:.3f}', (20, window_size[1]-320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
# Unlock gaze movement identification
gaze_movement_lock.release()
- # Draw frame
- cv2.imshow(window_name, aoi_matrix)
+ # Draw image
+ cv2.imshow(window_name, image)
key_pressed = cv2.waitKey(10)
@@ -500,7 +500,7 @@ def main():
except KeyboardInterrupt:
pass
- # Stop frame display
+ # Stop image display
cv2.destroyAllWindows()
if __name__ == '__main__':
diff --git a/src/argaze/utils/demo_heatmap_run.py b/src/argaze/utils/demo_heatmap_run.py
index df98d33..e4672d4 100644
--- a/src/argaze/utils/demo_heatmap_run.py
+++ b/src/argaze/utils/demo_heatmap_run.py
@@ -10,36 +10,36 @@ import cv2
def main():
window_name = 'Heatmap'
- frame_size = (800, 600)
+ image_size = (800, 600)
aoi = AOIFeatures.AreaOfInterest([[0, 0], [1, 0], [1, 1], [0, 1]])
- aoi_frame = AOIFeatures.AOIFrame(aoi, frame_size)
+ aoi_image = AOIFeatures.AOIFrame(aoi, image_size)
- aoi_frame.heatmap_init()
+ aoi_image.heatmap_init()
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
# Update pointer position
def on_mouse_event(event, x, y, flags, param):
- aoi_frame.heatmap_update((x, y), sigma=0.05)
+ aoi_image.heatmap_update((x, y), sigma=0.05)
# Attach mouse callback to window
cv2.setMouseCallback(window_name, on_mouse_event)
while True:
- cv2.imshow(window_name, aoi_frame.heatmap)
+ cv2.imshow(window_name, aoi_image.heatmap)
# Stop and save picture by pressing 'Esc' key
if cv2.waitKey(10) == 27:
current_directory = os.path.dirname(os.path.abspath(__file__))
- cv2.imwrite(os.path.join(current_directory,'heatmap.png'), aoi_frame.heatmap)
+ cv2.imwrite(os.path.join(current_directory,'heatmap.png'), aoi_image.heatmap)
break
- # Stop frame display
+ # Stop image display
cv2.destroyAllWindows()
if __name__ == '__main__':
diff --git a/src/argaze/utils/environment_edit.py b/src/argaze/utils/environment_edit.py
index fc17f37..1cf1d2f 100644
--- a/src/argaze/utils/environment_edit.py
+++ b/src/argaze/utils/environment_edit.py
@@ -23,7 +23,7 @@ import numpy
def main():
"""
- Load AR environment from .json file, detect ArUco markers into movie frames and estimate environment pose.
+ Load AR environment from .json file, detect ArUco markers into movie images and estimate environment pose.
Edit environment setup to improve pose estimation.
"""
@@ -100,16 +100,16 @@ def main():
video_capture = cv2.VideoCapture(args.movie)
video_fps = video_capture.get(cv2.CAP_PROP_FPS)
- frame_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
- frame_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
+ image_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
+ image_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Enable exit signal handler
exit = MiscFeatures.ExitSignalHandler()
- # Init frame selection
- current_frame_index = -1
- _, current_frame = video_capture.read()
- next_frame_index = int(args.start * video_fps)
+ # Init image selection
+ current_image_index = -1
+ _, current_image = video_capture.read()
+ next_image_index = int(args.start * video_fps)
refresh = False
# Init marker selection
@@ -125,32 +125,32 @@ def main():
# Edit fake gaze position from pointer
gaze_position = GazeFeatures.GazePosition(pointer, precision=2)
- # Reset info frame
- info_frame = numpy.full((850, 1500, 3), 0, dtype=numpy.uint8)
+ # Reset info image
+ info_image = numpy.full((850, 1500, 3), 0, dtype=numpy.uint8)
- # Select a new frame and detect markers once
- if next_frame_index != current_frame_index or refresh or draw_cover:
+ # Select a new image and detect markers once
+ if next_image_index != current_image_index or refresh or draw_cover:
- video_capture.set(cv2.CAP_PROP_POS_FRAMES, next_frame_index)
+ video_capture.set(cv2.CAP_PROP_POS_FRAMES, next_image_index)
- success, video_frame = video_capture.read()
+ success, video_image = video_capture.read()
if success:
# Refresh once
refresh = False
- current_frame_index = video_capture.get(cv2.CAP_PROP_POS_FRAMES) - 1
- current_frame_time = video_capture.get(cv2.CAP_PROP_POS_MSEC)
+ current_image_index = video_capture.get(cv2.CAP_PROP_POS_FRAMES) - 1
+ current_image_time = video_capture.get(cv2.CAP_PROP_POS_MSEC)
# Hide zone
if draw_cover:
# Draw black circle under pointer
- cv2.circle(video_frame, pointer, 50, (0, 0, 0), -1)
+ cv2.circle(video_image, pointer, 50, (0, 0, 0), -1)
# Detect markers
- ar_environment.aruco_detector.detect_markers(video_frame)
+ ar_environment.aruco_detector.detect_markers(video_image)
# Filter scene markers
scene_markers, _ = ar_scene.aruco_scene.filter_markers(ar_environment.aruco_detector.detected_markers)
@@ -159,27 +159,27 @@ def main():
ar_environment.aruco_detector.estimate_markers_pose(scene_markers.keys())
# Write scene detected markers
- cv2.putText(video_frame, f'{list(scene_markers.keys())}', (20, frame_height-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'{list(scene_markers.keys())}', (20, image_height-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
# Draw gray panel on top
- cv2.rectangle(video_frame, (0, 0), (frame_width, 50), (63, 63, 63), -1)
+ cv2.rectangle(video_image, (0, 0), (image_width, 50), (63, 63, 63), -1)
# Draw camera calibration
if draw_grid:
- cv2.putText(video_frame, f'Grid at {z_grid} cm', (500, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
- ar_environment.aruco_detector.optic_parameters.draw(video_frame, frame_width/10, frame_height/10, z_grid, color=(127, 127, 127))
+ cv2.putText(video_image, f'Grid at {z_grid} cm', (500, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ ar_environment.aruco_detector.optic_parameters.draw(video_image, image_width/10, image_height/10, z_grid, color=(127, 127, 127))
# Write timing
- cv2.putText(video_frame, f'Time: {int(current_frame_time)} ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'Time: {int(current_image_time)} ms', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
- # Copy frame
- current_frame = video_frame.copy()
+ # Copy image
+ current_image = video_image.copy()
- # Keep last frame
+ # Keep last image
else:
- video_frame = current_frame.copy()
+ video_image = current_image.copy()
# Handle scene marker selection on left click
if len(scene_markers) > 0:
@@ -217,7 +217,7 @@ def main():
m.color = (127, 127, 127)
# Draw center
- cv2.circle(video_frame, m.center.astype(int), 5, m.color, -1)
+ cv2.circle(video_image, m.center.astype(int), 5, m.color, -1)
try:
@@ -230,23 +230,23 @@ def main():
selected_marker = scene_markers[selected_marker_id]
# Write selected marker id
- cv2.rectangle(info_frame, (0, 0), (500, 50), selected_marker.color, -1)
- cv2.putText(info_frame, f'Selected marker #{selected_marker.identifier}', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.rectangle(info_frame, (0, 50), (500, frame_height), (255, 255, 255), -1)
+ cv2.rectangle(info_image, (0, 0), (500, 50), selected_marker.color, -1)
+ cv2.putText(info_image, f'Selected marker #{selected_marker.identifier}', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
+ cv2.rectangle(info_image, (0, 50), (500, image_height), (255, 255, 255), -1)
# Write selected marker rotation matrix
R = ArUcoScene.make_euler_rotation_vector(selected_marker.rotation)
- cv2.putText(info_frame, f'Rotation (camera axis)', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{R[0]:.3f}', (40, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{R[1]:.3f}', (40, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{R[2]:.3f}', (40, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'Rotation (camera axis)', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{R[0]:.3f}', (40, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{R[1]:.3f}', (40, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{R[2]:.3f}', (40, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
# Write selected marker translation vector
T = selected_marker.translation
- cv2.putText(info_frame, f'Translation (camera axis):', (20, 320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{T[0]:.3f}', (40, 360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{T[1]:.3f}', (40, 400), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{T[2]:.3f}', (40, 440), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'Translation (camera axis):', (20, 320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{T[0]:.3f}', (40, 360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{T[1]:.3f}', (40, 400), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{T[2]:.3f}', (40, 440), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
# Retreive selected marker place
selected_place = ar_scene.aruco_scene.places[selected_marker_id]
@@ -254,7 +254,7 @@ def main():
# On right click
if right_button:
- pointer_delta_x, pointer_delta_y = right_drag[0] / frame_width, right_drag[1] / frame_height
+ pointer_delta_x, pointer_delta_y = right_drag[0] / image_width, right_drag[1] / image_height
place_edit[selected_marker_id] = {'rotation': (0, 0, 0), 'translation': (0, 0, 0)}
@@ -292,40 +292,40 @@ def main():
hovered_marker = scene_markers[hovered_marker_id]
# Write hovered marker id
- cv2.rectangle(info_frame, (500, 0), (1000, 50), hovered_marker.color, -1)
- cv2.putText(info_frame, f'Hovered marker #{hovered_marker.identifier}', (520, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.rectangle(info_frame, (500, 50), (1000, frame_height), (255, 255, 255), -1)
+ cv2.rectangle(info_image, (500, 0), (1000, 50), hovered_marker.color, -1)
+ cv2.putText(info_image, f'Hovered marker #{hovered_marker.identifier}', (520, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
+ cv2.rectangle(info_image, (500, 50), (1000, image_height), (255, 255, 255), -1)
# Write hovered marker rotation matrix
R = ArUcoScene.make_euler_rotation_vector(hovered_marker.rotation)
- cv2.putText(info_frame, f'Rotation (camera axis)', (520, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{R[0]:.3f}', (540, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{R[1]:.3f}', (540, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{R[2]:.3f}', (540, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'Rotation (camera axis)', (520, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{R[0]:.3f}', (540, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{R[1]:.3f}', (540, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{R[2]:.3f}', (540, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
# Write hovered marker translation vector
T = hovered_marker.translation
- cv2.putText(info_frame, f'Translation (camera axis):', (520, 320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{T[0]:.3f}', (540, 360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{T[1]:.3f}', (540, 400), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{T[2]:.3f}', (540, 440), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'Translation (camera axis):', (520, 320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{T[0]:.3f}', (540, 360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{T[1]:.3f}', (540, 400), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{T[2]:.3f}', (540, 440), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
# Retreive hovered marker place
hovered_place = ar_scene.aruco_scene.places[hovered_marker_id]
# Write hovered place rotation matrix
R = ArUcoScene.make_euler_rotation_vector(hovered_place.rotation)
- cv2.putText(info_frame, f'Rotation (scene axis):', (520, 500), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{R[0]:.3f}', (540, 540), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{R[1]:.3f}', (540, 580), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{R[2]:.3f}', (540, 620), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'Rotation (scene axis):', (520, 500), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{R[0]:.3f}', (540, 540), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{R[1]:.3f}', (540, 580), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{R[2]:.3f}', (540, 620), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
# Write hovered place translation vector
T = hovered_place.translation
- cv2.putText(info_frame, f'Translation (scene axis):', (520, 700), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{T[0]:.3f}', (540, 740), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{T[1]:.3f}', (540, 780), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{T[2]:.3f}', (540, 820), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'Translation (scene axis):', (520, 700), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{T[0]:.3f}', (540, 740), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{T[1]:.3f}', (540, 780), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{T[2]:.3f}', (540, 820), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
# Rotation between markers and places
markers_rotation_matrix = hovered_marker.rotation.dot(selected_marker.rotation.T)
@@ -339,37 +339,37 @@ def main():
places_translation = hovered_place.translation - selected_place.translation
# Write selected/hovered markers id
- cv2.rectangle(info_frame, (1000, 0), (1500, 50), (63, 63, 63), -1)
- cv2.putText(info_frame, f'#{selected_marker.identifier} -> #{hovered_marker.identifier}', (1020, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
- cv2.rectangle(info_frame, (1000, 50), (1500, frame_height), (190, 190, 190), -1)
+ cv2.rectangle(info_image, (1000, 0), (1500, 50), (63, 63, 63), -1)
+ cv2.putText(info_image, f'#{selected_marker.identifier} -> #{hovered_marker.identifier}', (1020, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.rectangle(info_image, (1000, 50), (1500, image_height), (190, 190, 190), -1)
# Write selected/hovered markers rotation matrix
R = markers_rotation_vector
- cv2.putText(info_frame, f'Rotation (camera axis)', (1020, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{R[0]:.3f}', (1040, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{R[1]:.3f}', (1040, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{R[2]:.3f}', (1040, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'Rotation (camera axis)', (1020, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{R[0]:.3f}', (1040, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{R[1]:.3f}', (1040, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{R[2]:.3f}', (1040, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
# Write selected/hovered markers translation vector
T = markers_translation
- cv2.putText(info_frame, f'Translation (camera axis):', (1020, 320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{T[0]:.3f}', (1040, 360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{T[1]:.3f}', (1040, 400), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{T[2]:.3f}', (1040, 440), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'Translation (camera axis):', (1020, 320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{T[0]:.3f}', (1040, 360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{T[1]:.3f}', (1040, 400), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{T[2]:.3f}', (1040, 440), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
# Write selected/hovered places rotation matrix
R = places_rotation_vector
- cv2.putText(info_frame, f'Rotation (scene axis):', (1020, 500), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{R[0]:.3f}', (1040, 540), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{R[1]:.3f}', (1040, 580), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{R[2]:.3f}', (1040, 620), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'Rotation (scene axis):', (1020, 500), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{R[0]:.3f}', (1040, 540), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{R[1]:.3f}', (1040, 580), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{R[2]:.3f}', (1040, 620), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
# Write selected/hovered places translation vector
T = places_translation
- cv2.putText(info_frame, f'Translation (scene axis):', (1020, 700), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{T[0]:.3f}', (1040, 740), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{T[1]:.3f}', (1040, 780), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{T[2]:.3f}', (1040, 820), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'Translation (scene axis):', (1020, 700), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{T[0]:.3f}', (1040, 740), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{T[1]:.3f}', (1040, 780), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{T[2]:.3f}', (1040, 820), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
if snap:
@@ -404,17 +404,17 @@ def main():
# Write edited place rotation matrix
R = ArUcoScene.make_euler_rotation_vector(edited_place.rotation)
- cv2.putText(info_frame, f'Rotation (scene axis):', (20, 500), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{R[0]:.3f}', (40, 540), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{R[1]:.3f}', (40, 580), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{R[2]:.3f}', (40, 620), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'Rotation (scene axis):', (20, 500), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{R[0]:.3f}', (40, 540), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{R[1]:.3f}', (40, 580), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{R[2]:.3f}', (40, 620), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
# Write edited place translation vector
T = edited_place.translation
- cv2.putText(info_frame, f'Translation (scene axis):', (20, 700), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{T[0]:.3f}', (40, 740), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{T[1]:.3f}', (40, 780), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
- cv2.putText(info_frame, f'{T[2]:.3f}', (40, 820), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'Translation (scene axis):', (20, 700), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{T[0]:.3f}', (40, 740), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{T[1]:.3f}', (40, 780), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'{T[2]:.3f}', (40, 820), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv2.LINE_AA)
# Replace selected place by edited place
ar_scene.aruco_scene.places[selected_marker_id] = edited_place
@@ -423,37 +423,37 @@ def main():
ar_scene.aruco_scene.init_places_consistency()
# Estimate scene pose from each marker
- cv2.putText(video_frame, f'Single marker scene pose estimation', (20, frame_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'Single marker scene pose estimation', (20, image_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
for i, m in scene_markers.items():
tvec, rmat = ar_scene.aruco_scene.estimate_pose_from_single_marker(m)
- # Project AOI scene into frame according estimated pose
+ # Project AOI scene into image according estimated pose
aoi_scene_projection = ar_scene.project(tvec, rmat, visual_hfov=TobiiSpecifications.VISUAL_HFOV)
if i == selected_marker_id:
# Draw AOI scene projection with gaze
- aoi_scene_projection.draw_circlecast(video_frame, gaze_position, 1, base_color=m.color, matching_color=(255, 255, 255))
+ aoi_scene_projection.draw_circlecast(video_image, gaze_position, 1, base_color=m.color, matching_color=(255, 255, 255))
else:
# Draw AOI scene
- aoi_scene_projection.draw(video_frame, color=m.color)
+ aoi_scene_projection.draw(video_image, color=m.color)
# Draw expected marker places
- ar_scene.draw_places(video_frame)
+ ar_scene.draw_places(video_image)
# Catch missing selected marker
except KeyError:
- cv2.putText(video_frame, f'Marker {selected_marker_id} not found', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'Marker {selected_marker_id} not found', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
# No marker selected
else:
- cv2.putText(info_frame, f'Left click on marker to select it', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'Left click on marker to select it', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
if len(scene_markers) > 1:
@@ -475,10 +475,10 @@ def main():
current_rotation = value['current']
expected_rotation = value['expected']
- cv2.putText(info_frame, f'Unconsistent rotation {label}: [{current_rotation[0]:.3f} {current_rotation[1]:.3f} {current_rotation[2]:.3f}]', (20, 120+line*40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'Unconsistent rotation {label}: [{current_rotation[0]:.3f} {current_rotation[1]:.3f} {current_rotation[2]:.3f}]', (20, 120+line*40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
line += 1
- cv2.putText(info_frame, f'Expected rotation {label}: [{expected_rotation[0]:.3f} {expected_rotation[1]:.3f} {expected_rotation[2]:.3f}]', (20, 120+line*40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'Expected rotation {label}: [{expected_rotation[0]:.3f} {expected_rotation[1]:.3f} {expected_rotation[2]:.3f}]', (20, 120+line*40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
line += 1
for i, (label, value) in enumerate(unconsistencies['translation'].items()):
@@ -486,10 +486,10 @@ def main():
current_translation = value['current']
expected_translation = value['expected']
- cv2.putText(info_frame, f'Unconsistent translation {label}: {current_translation:.3f}', (20, 120+ line*40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'Unconsistent translation {label}: {current_translation:.3f}', (20, 120+ line*40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
line += 1
- cv2.putText(info_frame, f'Expected translation {label}: {expected_translation:.3f}', (20, 120+ line*40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(info_image, f'Expected translation {label}: {expected_translation:.3f}', (20, 120+ line*40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
line += 1
# Force pose mode to single marker scene pose estimation
@@ -500,36 +500,36 @@ def main():
# Single marker scene pose estimation
if pose_mode == 0:
- cv2.putText(video_frame, f'Single marker scene pose estimation', (20, frame_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'Single marker scene pose estimation', (20, image_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
for i, m in scene_markers.items():
tvec, rmat = ar_scene.aruco_scene.estimate_pose_from_single_marker(m)
- # Project AOI scene into frame according estimated pose
+ # Project AOI scene into image according estimated pose
aoi_scene_projection = ar_scene.project(tvec, rmat, visual_hfov=TobiiSpecifications.VISUAL_HFOV)
# Draw AOI scene
- aoi_scene_projection.draw(video_frame, color=m.color)
+ aoi_scene_projection.draw(video_image, color=m.color)
# Consistent markers scene pose estimation
if pose_mode == 1:
- cv2.putText(video_frame, f'Consistent markers scene pose estimation', (20, frame_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'Consistent markers scene pose estimation', (20, image_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
tvec, rmat = ar_scene.aruco_scene.estimate_pose_from_markers(consistent_markers)
- # Project AOI scene into frame according estimated pose
+ # Project AOI scene into image according estimated pose
aoi_scene_projection = ar_scene.project(tvec, rmat, visual_hfov=TobiiSpecifications.VISUAL_HFOV)
# Draw AOI scene
- aoi_scene_projection.draw(video_frame, color=(255, 255, 255))
+ aoi_scene_projection.draw(video_image, color=(255, 255, 255))
# ArUco marker axis scene pose estimation
elif pose_mode == 2:
# Write pose estimation strategy
- cv2.putText(video_frame, f'ArUco marker axis scene pose estimation', (20, frame_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'ArUco marker axis scene pose estimation', (20, image_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
for axis_name, axis_markers in ar_scene.aruco_axis.items():
@@ -541,11 +541,11 @@ def main():
tvec, rmat = ar_scene.aruco_scene.estimate_pose_from_axis_markers(origin_marker, horizontal_axis_marker, vertical_axis_marker)
- # Project AOI scene into frame according estimated pose
+ # Project AOI scene into image according estimated pose
aoi_scene_projection = ar_scene.project(tvec, rmat, visual_hfov=TobiiSpecifications.VISUAL_HFOV)
# Draw AOI scene
- aoi_scene_projection.draw(video_frame, color=(255, 255, 255))
+ aoi_scene_projection.draw(video_image, color=(255, 255, 255))
break
@@ -556,7 +556,7 @@ def main():
elif pose_mode == 3:
# Write pose estimation strategy
- cv2.putText(video_frame, f'ArUco AOI scene building', (20, frame_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'ArUco AOI scene building', (20, image_height - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
try :
@@ -564,46 +564,46 @@ def main():
aoi_scene_projection = ar_scene.build_aruco_aoi_scene(scene_markers)
# Draw AOI scene
- aoi_scene_projection.draw(video_frame, color=(255, 255, 255))
+ aoi_scene_projection.draw(video_image, color=(255, 255, 255))
except:
pass
# Draw expected marker places
- #ar_scene.draw_places(video_frame)
+ #ar_scene.draw_places(video_image)
# Catch exceptions raised by estimate_pose and project methods
except (ArFeatures.PoseEstimationFailed, ArFeatures.SceneProjectionFailed) as e:
- cv2.rectangle(video_frame, (0, 90), (700, 130), (127, 127, 127), -1)
- cv2.putText(video_frame, f'Error: {e}', (20, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.rectangle(video_image, (0, 90), (700, 130), (127, 127, 127), -1)
+ cv2.putText(video_image, f'Error: {e}', (20, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- # Draw frame
- cv2.imshow(ar_environment.name, video_frame)
+ # Draw image
+ cv2.imshow(ar_environment.name, video_image)
# Draw detected markers
- ar_environment.aruco_detector.draw_detected_markers(video_frame)
+ ar_environment.aruco_detector.draw_detected_markers(video_image)
# Draw pointer
- gaze_position.draw(video_frame)
+ gaze_position.draw(video_image)
# Write documentation
- cv2.putText(video_frame, f'Press \'h\' for help', (950, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'Press \'h\' for help', (950, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
if draw_help:
- cv2.rectangle(video_frame, (0, 50), (700, 300), (127, 127, 127), -1)
- cv2.putText(video_frame, f'> Left click on marker: select marker', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_frame, f'> Left click on frame: unselect marker', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_frame, f'> T: translate, R: rotate, Z: select axis', (20, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_frame, f'> Right click and drag: edit axis', (20, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_frame, f'> Ctrl + S: save environment', (20, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(video_frame, f'> Backspace: reload environment', (20, 280), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.rectangle(video_image, (0, 50), (700, 300), (127, 127, 127), -1)
+ cv2.putText(video_image, f'> Left click on marker: select marker', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'> Left click on image: unselect marker', (20, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'> T: translate, R: rotate, Z: select axis', (20, 160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'> Right click and drag: edit axis', (20, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'> Ctrl + S: save environment', (20, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'> Backspace: reload environment', (20, 280), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
# Write selected marker id
elif selected_marker_id >= 0:
- cv2.rectangle(video_frame, (0, 50), (700, 90), (127, 127, 127), -1)
+ cv2.rectangle(video_image, (0, 50), (700, 90), (127, 127, 127), -1)
# Select color
if edit_z:
@@ -614,26 +614,26 @@ def main():
color_axis = (0, 255, 255)
if edit_trans:
- cv2.putText(video_frame, f'Rotate marker {selected_marker_id} around axis {str_axis}', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, color_axis, 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'Rotate marker {selected_marker_id} around axis {str_axis}', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, color_axis, 1, cv2.LINE_AA)
else:
- cv2.putText(video_frame, f'Translate marker {selected_marker_id} along axis {str_axis}', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, color_axis, 1, cv2.LINE_AA)
+ cv2.putText(video_image, f'Translate marker {selected_marker_id} along axis {str_axis}', (20, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, color_axis, 1, cv2.LINE_AA)
key_pressed = cv2.waitKey(10)
#if key_pressed != -1:
# print(key_pressed)
- # Select previous frame with left arrow
+ # Select previous image with left arrow
if key_pressed == 2:
- next_frame_index -= 1
+ next_image_index -= 1
- # Select next frame with right arrow
+ # Select next image with right arrow
if key_pressed == 3:
- next_frame_index += 1
+ next_image_index += 1
- # Clip frame index
- if next_frame_index < 0:
- next_frame_index = 0
+ # Clip image index
+ if next_image_index < 0:
+ next_image_index = 0
# Edit rotation with r key
if key_pressed == 114:
@@ -696,15 +696,15 @@ def main():
refresh = True
# Display video
- cv2.imshow(ar_environment.name, video_frame)
+ cv2.imshow(ar_environment.name, video_image)
# Display info
- cv2.imshow('Info', info_frame)
+ cv2.imshow('Info', info_image)
# Close movie capture
video_capture.release()
- # Stop frame display
+ # Stop image display
cv2.destroyAllWindows()
if __name__ == '__main__':