aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThéo de la Hogue2023-05-10 18:11:05 +0200
committerThéo de la Hogue2023-05-10 18:11:05 +0200
commit9a4ba48dbf6c8cd3d7f1a6d3b2aa7b704f6ede8d (patch)
treeb4089b3dca2cf9e842162285d25b761789e583f9
parent2d98ac67326dff680feb05ff38df7174503ebccb (diff)
downloadargaze-9a4ba48dbf6c8cd3d7f1a6d3b2aa7b704f6ede8d.zip
argaze-9a4ba48dbf6c8cd3d7f1a6d3b2aa7b704f6ede8d.tar.gz
argaze-9a4ba48dbf6c8cd3d7f1a6d3b2aa7b704f6ede8d.tar.bz2
argaze-9a4ba48dbf6c8cd3d7f1a6d3b2aa7b704f6ede8d.tar.xz
Improving demo scripts.
-rw-r--r--src/argaze/utils/README.md4
-rw-r--r--src/argaze/utils/demo_ar_features_run.py8
-rw-r--r--src/argaze/utils/demo_environment/aoi_scene.jpgbin0 -> 19108 bytes
-rw-r--r--src/argaze/utils/demo_gaze_features_run.py50
4 files changed, 43 insertions, 19 deletions
diff --git a/src/argaze/utils/README.md b/src/argaze/utils/README.md
index 694240b..dc97c7a 100644
--- a/src/argaze/utils/README.md
+++ b/src/argaze/utils/README.md
@@ -36,7 +36,7 @@ python ./src/argaze/utils/camera_calibrate.py 7 5 5 3 DICT_APRILTAG_16h5 -d DEVI
Load AR environment from **setup.json** file, detect ArUco markers into camera device (-d DEVICE) frames and estimate envirnoment pose.
```
-python ./src/argaze/utils/demo_ar_features_run.py ./src/argaze/utils/demo_environment/setup.json -d DEVICE
+python ./src/argaze/utils/demo_ar_features_run.py -d DEVICE
```
.. note::
@@ -50,5 +50,5 @@ python ./src/argaze/utils/demo_ar_features_run.py ./src/argaze/utils/demo_enviro
Simulate gaze position using mouse pointer to illustrate gaze features.
```
-python ./src/argaze/utils/demo_gaze_features_run.py ./src/argaze/utils/demo_environment/setup.json
+python ./src/argaze/utils/demo_gaze_features_run.py
``` \ No newline at end of file
diff --git a/src/argaze/utils/demo_ar_features_run.py b/src/argaze/utils/demo_ar_features_run.py
index cee16e1..92cbe88 100644
--- a/src/argaze/utils/demo_ar_features_run.py
+++ b/src/argaze/utils/demo_ar_features_run.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python
import argparse
+import os
from argaze import ArFeatures, GazeFeatures
@@ -11,15 +12,16 @@ def main():
Load AR environment from .json file, detect ArUco markers into camera device frames and estimate environment pose.
"""
+ current_directory = os.path.dirname(os.path.abspath(__file__))
+
# Manage arguments
parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
- parser.add_argument('environment', metavar='ENVIRONMENT', type=str, help='ar environment filepath')
-
parser.add_argument('-d', '--device', metavar='DEVICE', type=int, default=0, help='video capture device id')
args = parser.parse_args()
# Load AR enviroment
- demo_environment = ArFeatures.ArEnvironment.from_json(args.environment)
+ demo_environment_filepath = os.path.join(current_directory, 'demo_environment/setup.json')
+ demo_environment = ArFeatures.ArEnvironment.from_json(demo_environment_filepath)
print('ArEnvironment:\n', demo_environment)
diff --git a/src/argaze/utils/demo_environment/aoi_scene.jpg b/src/argaze/utils/demo_environment/aoi_scene.jpg
new file mode 100644
index 0000000..7aabe63
--- /dev/null
+++ b/src/argaze/utils/demo_environment/aoi_scene.jpg
Binary files differ
diff --git a/src/argaze/utils/demo_gaze_features_run.py b/src/argaze/utils/demo_gaze_features_run.py
index b83d60d..f1c6e19 100644
--- a/src/argaze/utils/demo_gaze_features_run.py
+++ b/src/argaze/utils/demo_gaze_features_run.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python
import argparse
+import os
import time
import threading
@@ -9,30 +10,37 @@ from argaze.GazeAnalysis import *
import cv2
import numpy
+import pandas
def main():
"""
Load AR environment from .json file to project AOI scene on screen and use mouse pointer to simulate gaze positions.
"""
+ current_directory = os.path.dirname(os.path.abspath(__file__))
+
# Manage arguments
parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
- parser.add_argument('environment', metavar='ENVIRONMENT', type=str, help='ar environment filepath')
-
parser.add_argument('-dev', '--deviation_max_threshold', metavar='DEVIATION_MAX_THRESHOLD', type=int, default=50, help='maximal distance for fixation identification in pixel')
parser.add_argument('-vel', '--velocity_max_threshold', metavar='VELOCITY_MAX_THRESHOLD', type=int, default=1, help='maximal velocity for fixation identification in pixel/millisecond')
parser.add_argument('-dmin', '--duration_min_threshold', metavar='DURATION_MIN_THRESHOLD', type=int, default=200, help='minimal duration for fixation identification in millisecond')
- parser.add_argument('-s', '--window-size', metavar='WINDOW_SIZE', type=tuple, default=(1920, 1080), help='size of window in pixel')
args = parser.parse_args()
# Load AR enviroment
- demo_environment = ArFeatures.ArEnvironment.from_json(args.environment)
+ demo_environment_filepath = os.path.join(current_directory, 'demo_environment/setup.json')
+ demo_environment = ArFeatures.ArEnvironment.from_json(demo_environment_filepath)
# Access to main AR scene
demo_scene = demo_environment.scenes["AR Scene Demo"]
+ # Load aoi scene image
+ aoi_scene_filepath = os.path.join(current_directory, 'demo_environment/aoi_scene.jpg')
+ aoi_scene_image = cv2.imread(aoi_scene_filepath)
+
+ window_size = [aoi_scene_image.shape[1], aoi_scene_image.shape[0]]
+
# Project AOI scene onto Full HD screen
- aoi_scene_projection = demo_scene.orthogonal_projection * args.window_size
+ aoi_scene_projection = demo_scene.orthogonal_projection * window_size
# Create a window to display AR environment
window_name = "AOI Scene"
@@ -48,7 +56,7 @@ def main():
visual_scan_path = GazeFeatures.VisualScanPath()
tpm = TransitionProbabilityMatrix.VisualScanPathAnalyzer()
- tpm_analysis = None
+ tpm_analysis = pandas.DataFrame()
gaze_movement_lock = threading.Lock()
@@ -102,8 +110,6 @@ def main():
tpm_analysis = tpm.analyze(visual_scan_path)
- print(tpm_analysis)
-
except GazeFeatures.VisualScanStepError as e:
print(f'Error on {e.aoi} step:', e)
@@ -127,7 +133,7 @@ def main():
# Analyse mouse positions
while True:
- aoi_matrix = numpy.full((int(args.window_size[1]), int(args.window_size[0]), 3), 0, dtype=numpy.uint8)
+ aoi_matrix = aoi_scene_image.copy()
# Write identification mode
cv2.putText(aoi_matrix, f'Gaze movement identification mode: {identification_mode} (Press \'m\' key to switch)', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
@@ -141,10 +147,10 @@ def main():
current_fixation = gaze_movement_identifier[identification_mode].current_fixation
# Draw looked AOI
- aoi_scene_projection.draw_circlecast(aoi_matrix, current_fixation.focus, current_fixation.deviation_max)
+ aoi_scene_projection.draw_circlecast(aoi_matrix, current_fixation.focus, current_fixation.deviation_max, base_color=(0, 0, 0), matching_color=(255, 255, 255))
# Draw current fixation
- cv2.circle(aoi_matrix, (int(current_fixation.focus[0]), int(current_fixation.focus[1])), int(current_fixation.deviation_max), (0, 255, 0), len(current_fixation.positions))
+ cv2.circle(aoi_matrix, (int(current_fixation.focus[0]), int(current_fixation.focus[1])), int(current_fixation.deviation_max), (255, 255, 255), len(current_fixation.positions))
# Draw current fixation gaze positions
gaze_positions = current_fixation.positions.copy()
@@ -165,7 +171,7 @@ def main():
gaze_position.draw(aoi_matrix, draw_precision=False)
# Draw AOI scene projection
- aoi_scene_projection.draw(aoi_matrix, color=(0, 0, 255))
+ aoi_scene_projection.draw(aoi_matrix, color=(0, 0, 0))
# Check saccade identification
if gaze_movement_identifier[identification_mode].current_saccade != None:
@@ -191,8 +197,24 @@ def main():
path += f'> {step.aoi} '
path += f'> {visual_scan_path.current_aoi}'
- cv2.putText(aoi_matrix, path, (20, args.window_size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
-
+ cv2.putText(aoi_matrix, path, (20, window_size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+
+ # Draw transition probability matrix
+ for from_aoi, column in tpm_analysis.items():
+
+ for to_aoi, probability in column.items():
+
+ if from_aoi != to_aoi and probability > 0.0:
+
+ from_center = aoi_scene_projection[from_aoi].center.astype(int)
+ to_center = aoi_scene_projection[to_aoi].center.astype(int)
+ start_line = (0.5 * from_center + 0.5 * to_center).astype(int)
+
+ color = [int(probability*200) + 55, int(probability*200) + 55, int(probability*200) + 55]
+
+ cv2.line(aoi_matrix, start_line, to_center, color, int(probability*10) + 2)
+ cv2.line(aoi_matrix, from_center, to_center, [55, 55, 55], 2)
+
# Unlock gaze movement identification
gaze_movement_lock.release()