aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/argaze.test/GazeAnalysis/NearestNeighborIndex.py6
-rw-r--r--src/argaze.test/GazeFeatures.py10
-rw-r--r--src/argaze/ArFeatures.py94
-rw-r--r--src/argaze/GazeAnalysis/NearestNeighborIndex.py2
-rw-r--r--src/argaze/utils/demo_ar_features_run.py26
-rw-r--r--src/argaze/utils/demo_environment/demo_ar_features_setup.json4
-rw-r--r--src/argaze/utils/demo_environment/demo_gaze_features_setup.json4
-rw-r--r--src/argaze/utils/demo_environment/frame_background.jpg (renamed from src/argaze/utils/demo_environment/screen_background.jpg)bin19108 -> 19108 bytes
-rw-r--r--src/argaze/utils/demo_gaze_features_run.py88
9 files changed, 118 insertions, 116 deletions
diff --git a/src/argaze.test/GazeAnalysis/NearestNeighborIndex.py b/src/argaze.test/GazeAnalysis/NearestNeighborIndex.py
index abd6b2c..c434e1e 100644
--- a/src/argaze.test/GazeAnalysis/NearestNeighborIndex.py
+++ b/src/argaze.test/GazeAnalysis/NearestNeighborIndex.py
@@ -21,11 +21,11 @@ class TestScanPathAnalyzer(unittest.TestCase):
def test_analyze(self):
"""Test analyze."""
- screen_dimension = (100, 100)
+ frame_dimension = (100, 100)
- nni_analyzer = NearestNeighborIndex.ScanPathAnalyzer(size=screen_dimension)
+ nni_analyzer = NearestNeighborIndex.ScanPathAnalyzer(size=frame_dimension)
- scan_path = GazeFeaturesTest.build_scan_path(6, screen_dimension)
+ scan_path = GazeFeaturesTest.build_scan_path(6, frame_dimension)
# Check aoi scan path
self.assertEqual(len(scan_path), 6)
diff --git a/src/argaze.test/GazeFeatures.py b/src/argaze.test/GazeFeatures.py
index 8cd2e56..dd3f1c0 100644
--- a/src/argaze.test/GazeFeatures.py
+++ b/src/argaze.test/GazeFeatures.py
@@ -15,7 +15,7 @@ from argaze import GazeFeatures
import numpy
import pandas
-def random_gaze_positions(size, screen_dimension: tuple[float, float] = (1, 1)):
+def random_gaze_positions(size, frame_dimension: tuple[float, float] = (1, 1)):
""" Generate random TimeStampedGazePsoitions for testing purpose.
Timestamps are current time.
GazePositions are random values.
@@ -29,7 +29,7 @@ def random_gaze_positions(size, screen_dimension: tuple[float, float] = (1, 1)):
for i in range(0, size):
# Edit gaze position
- random_gaze_position = GazeFeatures.GazePosition((random.random() * screen_dimension[0], random.random() * screen_dimension[1]))
+ random_gaze_position = GazeFeatures.GazePosition((random.random() * frame_dimension[0], random.random() * frame_dimension[1]))
# Store gaze position
ts_gaze_positions[time.time()] = random_gaze_position
@@ -303,18 +303,18 @@ class TestScanStepClass(unittest.TestCase):
self.assertEqual(scan_step.last_saccade, saccade)
self.assertGreater(scan_step.duration, 0)
-def build_scan_path(size, screen_dimension: tuple[float, float] = (1, 1)):
+def build_scan_path(size, frame_dimension: tuple[float, float] = (1, 1)):
"""Build scan path"""
scan_path = GazeFeatures.ScanPath()
for i in range(size):
- fixation = TestFixation(random_gaze_positions(10, screen_dimension))
+ fixation = TestFixation(random_gaze_positions(10, frame_dimension))
ts, _ = fixation.positions.first
scan_path.append_fixation(ts, fixation)
- saccade = TestSaccade(random_gaze_positions(2, screen_dimension))
+ saccade = TestSaccade(random_gaze_positions(2, frame_dimension))
ts, _ = saccade.positions.first
scan_path.append_saccade(ts, saccade)
diff --git a/src/argaze/ArFeatures.py b/src/argaze/ArFeatures.py
index 6e651f1..3e1a56f 100644
--- a/src/argaze/ArFeatures.py
+++ b/src/argaze/ArFeatures.py
@@ -29,7 +29,7 @@ ArEnvironmentType = TypeVar('ArEnvironment', bound="ArEnvironment")
ArSceneType = TypeVar('ArScene', bound="ArScene")
# Type definition for type annotation convenience
-ArScreenType = TypeVar('ArScreen', bound="ArScreen")
+ArFrameType = TypeVar('ArFrame', bound="ArFrame")
# Type definition for type annotation convenience
class EnvironmentJSONLoadingFailed(Exception):
@@ -165,27 +165,27 @@ class ArEnvironment():
new_aoi_3d_scene = AOI3DScene.AOI3DScene(aoi_3d_scene_value)
- # Build screens
- new_screens = {}
- for screen_name, screen_data in scene_data.pop('screens').items():
+ # Build frames
+ new_frames = {}
+ for frame_name, frame_data in scene_data.pop('frames').items():
- new_screen_size = screen_data.pop('size')
+ new_frame_size = frame_data.pop('size')
# Load background image
try:
- new_screen_background_value = screen_data.pop('background')
- new_screen_background = cv2.imread(os.path.join(working_directory, new_screen_background_value))
- new_screen_background = cv2.resize(new_screen_background, dsize=(new_screen_size[0], new_screen_size[1]), interpolation=cv2.INTER_CUBIC)
+ new_frame_background_value = frame_data.pop('background')
+ new_frame_background = cv2.imread(os.path.join(working_directory, new_frame_background_value))
+ new_frame_background = cv2.resize(new_frame_background, dsize=(new_frame_size[0], new_frame_size[1]), interpolation=cv2.INTER_CUBIC)
except KeyError:
- new_screen_background = numpy.zeros((new_screen_size[1], new_screen_size[0], 3)).astype(numpy.uint8)
+ new_frame_background = numpy.zeros((new_frame_size[1], new_frame_size[0], 3)).astype(numpy.uint8)
# Load gaze movement identifier
try:
- gaze_movement_identifier_value = screen_data.pop('gaze_movement_identifier')
+ gaze_movement_identifier_value = frame_data.pop('gaze_movement_identifier')
gaze_movement_identifier_type, gaze_movement_identifier_parameters = gaze_movement_identifier_value.popitem()
@@ -201,7 +201,7 @@ class ArEnvironment():
try:
- new_scan_path_analyzers_value = screen_data.pop('scan_path_analyzers')
+ new_scan_path_analyzers_value = frame_data.pop('scan_path_analyzers')
for scan_path_analyzer_type, scan_path_analyzer_parameters in new_scan_path_analyzers_value.items():
@@ -245,7 +245,7 @@ class ArEnvironment():
try:
- new_aoi_scan_path_analyzers_value = screen_data.pop('aoi_scan_path_analyzers')
+ new_aoi_scan_path_analyzers_value = frame_data.pop('aoi_scan_path_analyzers')
for aoi_scan_path_analyzer_type, aoi_scan_path_analyzer_parameters in new_aoi_scan_path_analyzers_value.items():
@@ -284,11 +284,11 @@ class ArEnvironment():
pass
- # Append new screen
- new_screens[screen_name] = ArScreen.from_scene(new_aoi_3d_scene, screen_name, new_screen_size, new_screen_background, gaze_movement_identifier, new_scan_path_analyzers, new_aoi_scan_path_analyzers, **screen_data)
+ # Append new frame
+ new_frames[frame_name] = ArFrame.from_scene(new_aoi_3d_scene, frame_name, new_frame_size, new_frame_background, gaze_movement_identifier, new_scan_path_analyzers, new_aoi_scan_path_analyzers, **frame_data)
# Append new scene
- new_scenes[scene_name] = ArScene(new_aruco_scene, new_aoi_3d_scene, new_screens, **scene_data)
+ new_scenes[scene_name] = ArScene(new_aruco_scene, new_aoi_3d_scene, new_frames, **scene_data)
return ArEnvironment(new_name, new_aruco_detector, new_scenes)
@@ -307,16 +307,16 @@ class ArEnvironment():
return output
@property
- def screens(self):
- """Iterate over all environment screens"""
+ def frames(self):
+ """Iterate over all environment frames"""
# For each scene
for scene_name, scene in self.scenes.items():
- # For each screen
- for screen_name, screen in scene.screens.items():
+ # For each frame
+ for frame_name, frame in scene.frames.items():
- yield scene_name, screen_name, screen
+ yield scene_name, frame_name, frame
def detect_and_project(self, image: numpy.array) -> dict:
"""Detect environment aruco markers from image and project scenes."""
@@ -374,23 +374,23 @@ class ArEnvironment():
aoi_2d_scene = self.__aoi_2d_scenes[scene_name]
- # For each scene screens
- for screen_name, screen in scene.screens.items():
+ # For each scene frames
+ for frame_name, frame in scene.frames.items():
# TODO: Add option to use gaze precision circle
- if aoi_2d_scene[screen.name].contains_point(gaze_position.value):
+ if aoi_2d_scene[frame.name].contains_point(gaze_position.value):
- inner_x, inner_y = self.__aoi_2d_scenes[scene_name][screen.name].clockwise().inner_axis(gaze_position.value)
+ inner_x, inner_y = self.__aoi_2d_scenes[scene_name][frame.name].clockwise().inner_axis(gaze_position.value)
# QUESTION: How to project gaze precision?
inner_gaze_position = GazeFeatures.GazePosition((inner_x, inner_y))
- gaze_movement, look_at, scan_step_analysis, aoi_scan_step_analysis = screen.look(timestamp, inner_gaze_position * screen.size)
+ gaze_movement, look_at, scan_step_analysis, aoi_scan_step_analysis = frame.look(timestamp, inner_gaze_position * frame.size)
# Generate looking data
if data_generator:
- yield scene_name, screen_name, screen, gaze_movement, look_at, scan_step_analysis, aoi_scan_step_analysis
+ yield scene_name, frame_name, frame, gaze_movement, look_at, scan_step_analysis, aoi_scan_step_analysis
# Ignore missing aoi scene projection
except KeyError:
@@ -449,7 +449,7 @@ class ArScene():
aoi_3d_scene: AOI 3D scene description that will be projected onto estimated scene once its pose will be estimated : see [project][argaze.ArFeatures.ArScene.project] function below.
- screens: All scene screens
+ frames: All scene frames
aruco_axis: Optional dictionary to define orthogonal axis where each axis is defined by list of 3 markers identifier (first is origin). \
This pose estimation strategy is used by [estimate_pose][argaze.ArFeatures.ArScene.estimate_pose] function when at least 3 markers are detected.
@@ -463,7 +463,7 @@ class ArScene():
aruco_scene: ArUcoScene.ArUcoScene = field(default_factory=ArUcoScene.ArUcoScene)
aoi_3d_scene: AOI3DScene.AOI3DScene = field(default_factory=AOI3DScene.AOI3DScene)
- screens: dict = field(default_factory=dict)
+ frames: dict = field(default_factory=dict)
aruco_axis: dict = field(default_factory=dict)
aruco_aoi: dict = field(default_factory=dict)
angle_tolerance: float = field(default=0.)
@@ -477,9 +477,9 @@ class ArScene():
# Preprocess orthogonal projection to speed up further aruco aoi processings
self.__orthogonal_projection_cache = self.aoi_3d_scene.orthogonal_projection
- # Setup screens scene after screen creation
- for name, screen in self.screens.items():
- screen._scene = self
+ # Setup frames scene after frame creation
+ for name, frame in self.frames.items():
+ frame._scene = self
def __str__(self) -> str:
"""
@@ -662,13 +662,13 @@ class ArScene():
self.aruco_scene.draw_places(image, self._environment.aruco_detector.optic_parameters.K, self._environment.aruco_detector.optic_parameters.D)
@dataclass
-class ArScreen():
+class ArFrame():
"""
- Define Augmented Reality screen as an AOI2DScene made from a projected then reframed parent AOI3DScene.
+ Define Augmented Reality frame as an AOI2DScene made from a projected then reframed parent AOI3DScene.
Parameters:
- name: name of the screen
- size: screen dimension in pixel.
+ name: name of the frame
+ size: frame dimension in pixel.
background: image to draw behind
aoi_2d_scene: AOI 2D scene description ... : see [orthogonal_projection][argaze.ArFeatures.ArScene.orthogonal_projection] and [reframe][argaze.AreaOfInterest.AOI2DScene.reframe] functions.
...
@@ -702,11 +702,11 @@ class ArScreen():
self.__looking_lock = threading.Lock()
@classmethod
- def from_scene(self, aoi_3d_scene, aoi_name, size, background, gaze_movement_identifier, scan_path_analyzers: list = [], aoi_scan_path_analyzers: list = [], heatmap: bool = False) -> ArScreenType:
+ def from_scene(self, aoi_3d_scene, aoi_name, size, background, gaze_movement_identifier, scan_path_analyzers: list = [], aoi_scan_path_analyzers: list = [], heatmap: bool = False) -> ArFrameType:
aoi_2d_scene = aoi_3d_scene.orthogonal_projection.reframe(aoi_name, size)
- return ArScreen(aoi_name, \
+ return ArFrame(aoi_name, \
size, \
background, \
aoi_2d_scene, \
@@ -720,9 +720,9 @@ class ArScreen():
@property
def current_gaze_position(self):
- """Get current gaze position on screen."""
+ """Get current gaze position on frame."""
- # Wait for screen to be unlocked
+ # Wait for frame to be unlocked
while self.__looking_lock.locked():
pass
@@ -730,20 +730,20 @@ class ArScreen():
@property
def current_gaze_movement(self):
- """Get current gaze movement on screen."""
+ """Get current gaze movement on frame."""
- # Wait for screen to be unlocked
+ # Wait for frame to be unlocked
while self.__looking_lock.locked():
pass
- # Check current screen fixation
+ # Check current frame fixation
current_fixation = self.gaze_movement_identifier.current_fixation
if current_fixation.valid:
return current_fixation
- # Check current screen saccade
+ # Check current frame saccade
current_saccade = self.gaze_movement_identifier.current_saccade
if current_saccade.valid:
@@ -761,7 +761,7 @@ class ArScreen():
if scan_path != None:
- # Wait for screen to be unlocked
+ # Wait for frame to be unlocked
while self.__looking_lock.locked():
pass
@@ -781,7 +781,7 @@ class ArScreen():
if aoi_scan_path != None:
- # Wait for screen to be unlocked
+ # Wait for frame to be unlocked
while self.__looking_lock.locked():
pass
@@ -803,7 +803,7 @@ class ArScreen():
aoi_scan_step: new scan step (if aoi_scan_path is intanciated)
"""
- # Lock screen exploitation
+ # Lock frame exploitation
self.__looking_lock.acquire()
# Update internal gaze position
@@ -880,7 +880,7 @@ class ArScreen():
self.heatmap.update(self.__gaze_position.value, sigma=0.05)
- # Unlock screen exploitation
+ # Unlock frame exploitation
self.__looking_lock.release()
# Return looking data
diff --git a/src/argaze/GazeAnalysis/NearestNeighborIndex.py b/src/argaze/GazeAnalysis/NearestNeighborIndex.py
index 33b3333..3a3807f 100644
--- a/src/argaze/GazeAnalysis/NearestNeighborIndex.py
+++ b/src/argaze/GazeAnalysis/NearestNeighborIndex.py
@@ -26,7 +26,7 @@ class ScanPathAnalyzer(GazeFeatures.ScanPathAnalyzer):
"""Implementation of Nearest Neighbor Index (NNI) as described in Di Nocera et al., 2006
Parameters:
- size: screen dimension.
+ size: frame dimension.
"""
size: tuple[float, float]
diff --git a/src/argaze/utils/demo_ar_features_run.py b/src/argaze/utils/demo_ar_features_run.py
index 0f2c2c2..5d03fd9 100644
--- a/src/argaze/utils/demo_ar_features_run.py
+++ b/src/argaze/utils/demo_ar_features_run.py
@@ -32,6 +32,8 @@ def main():
# Load AR enviroment
ar_environment = ArFeatures.ArEnvironment.from_json(args.environment)
+ print(ar_environment)
+
# Create a window to display AR environment
cv2.namedWindow(ar_environment.name, cv2.WINDOW_AUTOSIZE)
@@ -81,25 +83,25 @@ def main():
# Display environment
cv2.imshow(ar_environment.name, video_image)
- # Draw and display each screens
- for scene_name, screen_name, screen in ar_environment.screens:
+ # Draw and display each frames
+ for scene_name, frame_name, frame in ar_environment.frames:
- image = screen.background.copy()
+ image = frame.background.copy()
- screen.aoi_2d_scene.draw(image, color=(255, 255, 255))
- screen.current_gaze_position.draw(image, color=(255, 255, 255))
+ frame.aoi_2d_scene.draw(image, color=(255, 255, 255))
+ frame.current_gaze_position.draw(image, color=(255, 255, 255))
- screen.current_gaze_movement.draw(image, color=(0, 255, 255))
- screen.current_gaze_movement.draw_positions(image)
+ frame.current_gaze_movement.draw(image, color=(0, 255, 255))
+ frame.current_gaze_movement.draw_positions(image)
- # Check screen fixation
- if GazeFeatures.is_fixation(screen.current_gaze_movement):
+ # Check frame fixation
+ if GazeFeatures.is_fixation(frame.current_gaze_movement):
# Draw looked AOI
- screen.aoi_2d_scene.draw_circlecast(image, screen.current_gaze_movement.focus, screen.current_gaze_movement.deviation_max, base_color=(0, 0, 0), matching_color=(255, 255, 255))
+ frame.aoi_2d_scene.draw_circlecast(image, frame.current_gaze_movement.focus, frame.current_gaze_movement.deviation_max, base_color=(0, 0, 0), matching_color=(255, 255, 255))
- # Display screen
- cv2.imshow(f'{scene_name}:{screen_name}', image)
+ # Display frame
+ cv2.imshow(f'{scene_name}:{frame_name}', image)
# Stop by pressing 'Esc' key
if cv2.waitKey(10) == 27:
diff --git a/src/argaze/utils/demo_environment/demo_ar_features_setup.json b/src/argaze/utils/demo_environment/demo_ar_features_setup.json
index 05b0d0b..5d5b139 100644
--- a/src/argaze/utils/demo_environment/demo_ar_features_setup.json
+++ b/src/argaze/utils/demo_environment/demo_ar_features_setup.json
@@ -16,10 +16,10 @@
"AR Scene Demo" : {
"aruco_scene": "aruco_scene.obj",
"aoi_3d_scene": "aoi_3d_scene.obj",
- "screens": {
+ "frames": {
"GrayRectangle": {
"size": [640, 480],
- "background": "screen_background.jpg",
+ "background": "frame_background.jpg",
"gaze_movement_identifier": {
"DispersionThresholdIdentification": {
"deviation_max_threshold": 50,
diff --git a/src/argaze/utils/demo_environment/demo_gaze_features_setup.json b/src/argaze/utils/demo_environment/demo_gaze_features_setup.json
index 86f9a5e..c4b5cc2 100644
--- a/src/argaze/utils/demo_environment/demo_gaze_features_setup.json
+++ b/src/argaze/utils/demo_environment/demo_gaze_features_setup.json
@@ -3,10 +3,10 @@
"scenes": {
"AR Scene Demo" : {
"aoi_3d_scene": "aoi_3d_scene.obj",
- "screens": {
+ "frames": {
"GrayRectangle": {
"size": [1920, 1149],
- "background": "screen_background.jpg",
+ "background": "frame_background.jpg",
"gaze_movement_identifier": {
"DispersionThresholdIdentification": {
"deviation_max_threshold": 50,
diff --git a/src/argaze/utils/demo_environment/screen_background.jpg b/src/argaze/utils/demo_environment/frame_background.jpg
index 7aabe63..7aabe63 100644
--- a/src/argaze/utils/demo_environment/screen_background.jpg
+++ b/src/argaze/utils/demo_environment/frame_background.jpg
Binary files differ
diff --git a/src/argaze/utils/demo_gaze_features_run.py b/src/argaze/utils/demo_gaze_features_run.py
index e30f867..9cb0fb6 100644
--- a/src/argaze/utils/demo_gaze_features_run.py
+++ b/src/argaze/utils/demo_gaze_features_run.py
@@ -35,11 +35,11 @@ def main():
# Load AR environment
ar_environment = ArFeatures.ArEnvironment.from_json(args.environment)
- # Select AR screen
- ar_screen = ar_environment.scenes["AR Scene Demo"].screens["GrayRectangle"]
+ # Select AR frame
+ ar_frame = ar_environment.scenes["AR Scene Demo"].frames["GrayRectangle"]
# Create a window to display AR environment
- cv2.namedWindow(ar_screen.name, cv2.WINDOW_AUTOSIZE)
+ cv2.namedWindow(ar_frame.name, cv2.WINDOW_AUTOSIZE)
# Heatmap buffer display option
enable_heatmap_buffer = False
@@ -55,15 +55,15 @@ def main():
# Edit millisecond timestamp
timestamp = int((time.time() - start_time) * 1e3)
- # Project gaze position into screen
- ar_screen.look(timestamp, GazeFeatures.GazePosition((x, y)))
+ # Project gaze position into frame
+ ar_frame.look(timestamp, GazeFeatures.GazePosition((x, y)))
except GazeFeatures.AOIScanStepError as e:
print(f'Error on {e.aoi} step:', e)
# Attach mouse callback to window
- cv2.setMouseCallback(ar_screen.name, on_mouse_event)
+ cv2.setMouseCallback(ar_frame.name, on_mouse_event)
# Waiting for 'ctrl+C' interruption
try:
@@ -71,13 +71,13 @@ def main():
# Analyse mouse positions
while True:
- # Draw screen
- image = ar_screen.background.copy()
+ # Draw frame
+ image = ar_frame.background.copy()
# Draw heatmap
- if ar_screen.heatmap:
+ if ar_frame.heatmap:
- image = cv2.addWeighted(ar_screen.heatmap.image, 0.5, image, 1., 0)
+ image = cv2.addWeighted(ar_frame.heatmap.image, 0.5, image, 1., 0)
# Write heatmap buffer manual
buffer_on_off = 'on' if enable_heatmap_buffer else 'off'
@@ -85,39 +85,39 @@ def main():
cv2.putText(image, f'Heatmap buffer: {buffer_on_off} (Press \'b\' key to {buffer_display_disable})', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_heatmap_buffer else (255, 255, 255), 1, cv2.LINE_AA)
# Draw AOI
- ar_screen.aoi_2d_scene.draw(image, color=(0, 0, 0))
+ ar_frame.aoi_2d_scene.draw(image, color=(0, 0, 0))
# Draw gaze position
- ar_screen.current_gaze_position.draw(image, color=(255, 255, 255))
+ ar_frame.current_gaze_position.draw(image, color=(255, 255, 255))
# Draw gaze movements
- current_gaze_movement = ar_screen.current_gaze_movement
+ current_gaze_movement = ar_frame.current_gaze_movement
current_gaze_movement.draw(image, color=(0, 255, 255))
current_gaze_movement.draw_positions(image)
- # Check screen fixation
+ # Check frame fixation
if GazeFeatures.is_fixation(current_gaze_movement):
# Draw looked AOI
- ar_screen.aoi_2d_scene.draw_circlecast(image, current_gaze_movement.focus, current_gaze_movement.deviation_max, base_color=(0, 0, 0), matching_color=(255, 255, 255))
+ ar_frame.aoi_2d_scene.draw_circlecast(image, current_gaze_movement.focus, current_gaze_movement.deviation_max, base_color=(0, 0, 0), matching_color=(255, 255, 255))
# Write last 5 steps of aoi scan path
path = ''
- for step in ar_screen.aoi_scan_path[-5:]:
+ for step in ar_frame.aoi_scan_path[-5:]:
path += f'> {step.aoi} '
- path += f'> {ar_screen.aoi_scan_path.current_aoi}'
+ path += f'> {ar_frame.aoi_scan_path.current_aoi}'
- cv2.putText(image, path, (20, ar_screen.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, path, (20, ar_frame.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
# Display Transition matrix analysis if loaded
try:
- transition_matrix_analyzer = ar_screen.aoi_scan_path_analyzers["TransitionMatrix"]
+ transition_matrix_analyzer = ar_frame.aoi_scan_path_analyzers["TransitionMatrix"]
- cv2.putText(image, f'Transition matrix density: {transition_matrix_analyzer.transition_matrix_density:.2f}', (20, ar_screen.size[1]-160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Transition matrix density: {transition_matrix_analyzer.transition_matrix_density:.2f}', (20, ar_frame.size[1]-160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
# Iterate over indexes (departures)
for from_aoi, row in transition_matrix_analyzer.transition_matrix_probabilities.iterrows():
@@ -127,8 +127,8 @@ def main():
if from_aoi != to_aoi and probability > 0.0:
- from_center = ar_screen.aoi_2d_scene[from_aoi].center.astype(int)
- to_center = ar_screen.aoi_2d_scene[to_aoi].center.astype(int)
+ from_center = ar_frame.aoi_2d_scene[from_aoi].center.astype(int)
+ to_center = ar_frame.aoi_2d_scene[to_aoi].center.astype(int)
start_line = (0.5 * from_center + 0.5 * to_center).astype(int)
color = [int(probability*200) + 55, int(probability*200) + 55, int(probability*200) + 55]
@@ -142,16 +142,16 @@ def main():
# Display scan path K Coefficient analysis if loaded
try:
- kc_analyzer = ar_screen.scan_path_analyzers["KCoefficient"]
+ kc_analyzer = ar_frame.scan_path_analyzers["KCoefficient"]
# Write raw Kc analysis
if kc_analyzer.K < 0.:
- cv2.putText(image, f'K coefficient: Ambient attention', (20, ar_screen.size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'K coefficient: Ambient attention', (20, ar_frame.size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
elif kc_analyzer.K > 0.:
- cv2.putText(image, f'K coefficient: Focal attention', (20, ar_screen.size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(image, f'K coefficient: Focal attention', (20, ar_frame.size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)
except KeyError:
pass
@@ -159,16 +159,16 @@ def main():
# Display aoi scan path K-modified coefficient analysis if loaded
try:
- aoi_kc_analyzer = ar_screen.aoi_scan_path_analyzers["KCoefficient"]
+ aoi_kc_analyzer = ar_frame.aoi_scan_path_analyzers["KCoefficient"]
# Write aoi Kc analysis
if aoi_kc_analyzer.K < 0.:
- cv2.putText(image, f'K-modified coefficient: Ambient attention', (20, ar_screen.size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'K-modified coefficient: Ambient attention', (20, ar_frame.size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
elif aoi_kc_analyzer.K > 0.:
- cv2.putText(image, f'K-modified coefficient: Focal attention', (20, ar_screen.size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(image, f'K-modified coefficient: Focal attention', (20, ar_frame.size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)
except KeyError:
pass
@@ -176,9 +176,9 @@ def main():
# Display Lempel-Ziv complexity analysis if loaded
try:
- lzc_analyzer = ar_screen.aoi_scan_path_analyzers["LempelZivComplexity"]
+ lzc_analyzer = ar_frame.aoi_scan_path_analyzers["LempelZivComplexity"]
- cv2.putText(image, f'Lempel-Ziv complexity: {lzc_analyzer.lempel_ziv_complexity}', (20, ar_screen.size[1]-200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Lempel-Ziv complexity: {lzc_analyzer.lempel_ziv_complexity}', (20, ar_frame.size[1]-200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
except KeyError:
pass
@@ -186,11 +186,11 @@ def main():
# Display N-Gram analysis if loaded
try:
- ngram_analyzer = ar_screen.aoi_scan_path_analyzers["NGram"]
+ ngram_analyzer = ar_frame.aoi_scan_path_analyzers["NGram"]
# Display only 3-gram analysis
- start = ar_screen.size[1] - ((len(ngram_analyzer.ngrams_count[3]) + 1) * 40)
- cv2.putText(image, f'{ngram_analyzer.n_max}-Gram:', (ar_screen.size[0]-700, start-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ start = ar_frame.size[1] - ((len(ngram_analyzer.ngrams_count[3]) + 1) * 40)
+ cv2.putText(image, f'{ngram_analyzer.n_max}-Gram:', (ar_frame.size[0]-700, start-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
for i, (ngram, count) in enumerate(ngram_analyzer.ngrams_count[3].items()):
@@ -198,7 +198,7 @@ def main():
for g in range(1, 3):
ngram_string += f'>{ngram[g]}'
- cv2.putText(image, f'{ngram_string}: {count}', (ar_screen.size[0]-700, start+(i*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'{ngram_string}: {count}', (ar_frame.size[0]-700, start+(i*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
except KeyError:
pass
@@ -206,10 +206,10 @@ def main():
# Display Entropy analysis if loaded
try:
- entropy_analyzer = ar_screen.aoi_scan_path_analyzers["Entropy"]
+ entropy_analyzer = ar_frame.aoi_scan_path_analyzers["Entropy"]
- cv2.putText(image, f'Stationary entropy: {entropy_analyzer.stationary_entropy:.3f},', (20, ar_screen.size[1]-280), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(image, f'Transition entropy: {entropy_analyzer.transition_entropy:.3f},', (20, ar_screen.size[1]-240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Stationary entropy: {entropy_analyzer.stationary_entropy:.3f},', (20, ar_frame.size[1]-280), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Transition entropy: {entropy_analyzer.transition_entropy:.3f},', (20, ar_frame.size[1]-240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
except KeyError:
pass
@@ -217,9 +217,9 @@ def main():
# Display Nearest Neighbor index analysis if loaded
try:
- nni_analyzer = ar_screen.scan_path_analyzers["NearestNeighborIndex"]
+ nni_analyzer = ar_frame.scan_path_analyzers["NearestNeighborIndex"]
- cv2.putText(image, f'Nearest neighbor index: {nni_analyzer.nearest_neighbor_index:.3f}', (20, ar_screen.size[1]-320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Nearest neighbor index: {nni_analyzer.nearest_neighbor_index:.3f}', (20, ar_frame.size[1]-320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
except KeyError:
pass
@@ -227,16 +227,16 @@ def main():
# Display Exploit/Explore ratio analysis if loaded
try:
- xxr_analyser = ar_screen.scan_path_analyzers["ExploitExploreRatio"]
+ xxr_analyser = ar_frame.scan_path_analyzers["ExploitExploreRatio"]
- cv2.putText(image, f'Exploit explore ratio: {xxr_analyser.exploit_explore_ratio:.3f}', (20, ar_screen.size[1]-360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(image, f'Exploit explore ratio: {xxr_analyser.exploit_explore_ratio:.3f}', (20, ar_frame.size[1]-360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
except KeyError:
pass
# Draw image
- cv2.imshow(ar_screen.name, image)
+ cv2.imshow(ar_frame.name, image)
key_pressed = cv2.waitKey(10)
@@ -247,14 +247,14 @@ def main():
if key_pressed == 114:
ar_environment = ArFeatures.ArEnvironment.from_json(args.environment)
- ar_screen = ar_environment.scenes["AR Scene Demo"].screens["GrayRectangle"]
+ ar_frame = ar_environment.scenes["AR Scene Demo"].frames["GrayRectangle"]
# Enable heatmap buffer with 'b' key
if key_pressed == 98:
enable_heatmap_buffer = not enable_heatmap_buffer
- ar_screen.heatmap.init(10 if enable_heatmap_buffer else 0)
+ ar_frame.heatmap.init(10 if enable_heatmap_buffer else 0)
# Stop calibration by pressing 'Esc' key
if key_pressed == 27: