aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/argaze/utils/demo_ar_features_run.py17
-rw-r--r--src/argaze/utils/demo_gaze_features_run.py74
2 files changed, 40 insertions, 51 deletions
diff --git a/src/argaze/utils/demo_ar_features_run.py b/src/argaze/utils/demo_ar_features_run.py
index 22b0d89..d7854e3 100644
--- a/src/argaze/utils/demo_ar_features_run.py
+++ b/src/argaze/utils/demo_ar_features_run.py
@@ -32,8 +32,6 @@ def main():
# Load AR enviroment
ar_environment = ArFeatures.ArEnvironment.from_json(args.environment)
- print(ar_environment)
-
# Create a window to display AR environment
cv2.namedWindow(ar_environment.name, cv2.WINDOW_AUTOSIZE)
@@ -47,7 +45,12 @@ def main():
timestamp = int((time.time() - start_time) * 1e3)
# Project gaze position into environment
- ar_environment.look(timestamp, GazeFeatures.GazePosition((x, y)))
+ for scene_name, scene_looking_data in ar_environment.look(timestamp, GazeFeatures.GazePosition((x, y))):
+
+ for frame_name, frame_looking_data in scene_looking_data:
+
+ # Do nothing with frame looking data
+ pass
# Attach mouse callback to window
cv2.setMouseCallback(ar_environment.name, on_mouse_event)
@@ -86,8 +89,14 @@ def main():
# Draw and display each frames in separate window
for scene_name, frame_name, frame in ar_environment.frames:
+ # Create frame image
+ frame_image = frame.image
+
+ # Draw frame info
+ frame.draw(frame_image)
+
# Display frame
- cv2.imshow(f'{scene_name}:{frame_name}', frame.image)
+ cv2.imshow(f'{scene_name}:{frame_name}', frame_image)
# Stop by pressing 'Esc' key
if cv2.waitKey(10) == 27:
diff --git a/src/argaze/utils/demo_gaze_features_run.py b/src/argaze/utils/demo_gaze_features_run.py
index b9ffcf8..4b4dc12 100644
--- a/src/argaze/utils/demo_gaze_features_run.py
+++ b/src/argaze/utils/demo_gaze_features_run.py
@@ -68,39 +68,19 @@ def main():
# Waiting for 'ctrl+C' interruption
try:
- # Analyse mouse positions
+ # Draw frame and mouse position analysis
while True:
- # Draw frame
- image = ar_frame.background.copy()
+ # Create frame image
+ frame_image = ar_frame.image
- # Draw heatmap
- if ar_frame.heatmap:
+ # Draw frame info
+ ar_frame.draw(frame_image)
- image = cv2.addWeighted(ar_frame.heatmap.image, 0.5, image, 1., 0)
-
- # Write heatmap buffer manual
- buffer_on_off = 'on' if enable_heatmap_buffer else 'off'
- buffer_display_disable = 'disable' if enable_heatmap_buffer else 'enable'
- cv2.putText(image, f'Heatmap buffer: {buffer_on_off} (Press \'b\' key to {buffer_display_disable})', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_heatmap_buffer else (255, 255, 255), 1, cv2.LINE_AA)
-
- # Draw AOI
- ar_frame.aoi_2d_scene.draw(image, color=(0, 0, 0))
-
- # Draw gaze position
- ar_frame.current_gaze_position.draw(image, color=(255, 255, 255))
-
- # Draw gaze movements
- current_gaze_movement = ar_frame.current_gaze_movement
-
- current_gaze_movement.draw(image, color=(0, 255, 255))
- current_gaze_movement.draw_positions(image)
-
- # Check frame fixation
- if GazeFeatures.is_fixation(current_gaze_movement):
-
- # Draw looked AOI
- ar_frame.aoi_2d_scene.draw_circlecast(image, current_gaze_movement.focus, current_gaze_movement.deviation_max, base_color=(0, 0, 0), matching_color=(255, 255, 255))
+ # Write heatmap buffer manual
+ buffer_on_off = 'on' if enable_heatmap_buffer else 'off'
+ buffer_display_disable = 'disable' if enable_heatmap_buffer else 'enable'
+ cv2.putText(frame_image, f'Heatmap buffer: {buffer_on_off} (Press \'b\' key to {buffer_display_disable})', (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255) if enable_heatmap_buffer else (255, 255, 255), 1, cv2.LINE_AA)
# Write last 5 steps of aoi scan path
path = ''
@@ -110,14 +90,14 @@ def main():
path += f'> {ar_frame.aoi_scan_path.current_aoi}'
- cv2.putText(image, path, (20, ar_frame.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(frame_image, path, (20, ar_frame.size[1]-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
# Display Transition matrix analysis if loaded
try:
transition_matrix_analyzer = ar_frame.aoi_scan_path_analyzers["TransitionMatrix"]
- cv2.putText(image, f'Transition matrix density: {transition_matrix_analyzer.transition_matrix_density:.2f}', (20, ar_frame.size[1]-160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(frame_image, f'Transition matrix density: {transition_matrix_analyzer.transition_matrix_density:.2f}', (20, ar_frame.size[1]-160), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
# Iterate over indexes (departures)
for from_aoi, row in transition_matrix_analyzer.transition_matrix_probabilities.iterrows():
@@ -133,8 +113,8 @@ def main():
color = [int(probability*200) + 55, int(probability*200) + 55, int(probability*200) + 55]
- cv2.line(image, start_line, to_center, color, int(probability*10) + 2)
- cv2.line(image, from_center, to_center, [55, 55, 55], 2)
+ cv2.line(frame_image, start_line, to_center, color, int(probability*10) + 2)
+ cv2.line(frame_image, from_center, to_center, [55, 55, 55], 2)
except KeyError:
pass
@@ -147,11 +127,11 @@ def main():
# Write raw Kc analysis
if kc_analyzer.K < 0.:
- cv2.putText(image, f'K coefficient: Ambient attention', (20, ar_frame.size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(frame_image, f'K coefficient: Ambient attention', (20, ar_frame.size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
elif kc_analyzer.K > 0.:
- cv2.putText(image, f'K coefficient: Focal attention', (20, ar_frame.size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(frame_image, f'K coefficient: Focal attention', (20, ar_frame.size[1]-120), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)
except KeyError:
pass
@@ -164,11 +144,11 @@ def main():
# Write aoi Kc analysis
if aoi_kc_analyzer.K < 0.:
- cv2.putText(image, f'K-modified coefficient: Ambient attention', (20, ar_frame.size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(frame_image, f'K-modified coefficient: Ambient attention', (20, ar_frame.size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
elif aoi_kc_analyzer.K > 0.:
- cv2.putText(image, f'K-modified coefficient: Focal attention', (20, ar_frame.size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)
+ cv2.putText(frame_image, f'K-modified coefficient: Focal attention', (20, ar_frame.size[1]-80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1, cv2.LINE_AA)
except KeyError:
pass
@@ -178,7 +158,7 @@ def main():
lzc_analyzer = ar_frame.aoi_scan_path_analyzers["LempelZivComplexity"]
- cv2.putText(image, f'Lempel-Ziv complexity: {lzc_analyzer.lempel_ziv_complexity}', (20, ar_frame.size[1]-200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(frame_image, f'Lempel-Ziv complexity: {lzc_analyzer.lempel_ziv_complexity}', (20, ar_frame.size[1]-200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
except KeyError:
pass
@@ -190,7 +170,7 @@ def main():
# Display only 3-gram analysis
start = ar_frame.size[1] - ((len(ngram_analyzer.ngrams_count[3]) + 1) * 40)
- cv2.putText(image, f'{ngram_analyzer.n_max}-Gram:', (ar_frame.size[0]-700, start-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(frame_image, f'{ngram_analyzer.n_max}-Gram:', (ar_frame.size[0]-700, start-40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
for i, (ngram, count) in enumerate(ngram_analyzer.ngrams_count[3].items()):
@@ -198,7 +178,7 @@ def main():
for g in range(1, 3):
ngram_string += f'>{ngram[g]}'
- cv2.putText(image, f'{ngram_string}: {count}', (ar_frame.size[0]-700, start+(i*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(frame_image, f'{ngram_string}: {count}', (ar_frame.size[0]-700, start+(i*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
except KeyError:
pass
@@ -208,8 +188,8 @@ def main():
entropy_analyzer = ar_frame.aoi_scan_path_analyzers["Entropy"]
- cv2.putText(image, f'Stationary entropy: {entropy_analyzer.stationary_entropy:.3f},', (20, ar_frame.size[1]-280), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
- cv2.putText(image, f'Transition entropy: {entropy_analyzer.transition_entropy:.3f},', (20, ar_frame.size[1]-240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(frame_image, f'Stationary entropy: {entropy_analyzer.stationary_entropy:.3f},', (20, ar_frame.size[1]-280), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(frame_image, f'Transition entropy: {entropy_analyzer.transition_entropy:.3f},', (20, ar_frame.size[1]-240), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
except KeyError:
pass
@@ -219,7 +199,7 @@ def main():
nni_analyzer = ar_frame.scan_path_analyzers["NearestNeighborIndex"]
- cv2.putText(image, f'Nearest neighbor index: {nni_analyzer.nearest_neighbor_index:.3f}', (20, ar_frame.size[1]-320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(frame_image, f'Nearest neighbor index: {nni_analyzer.nearest_neighbor_index:.3f}', (20, ar_frame.size[1]-320), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
except KeyError:
pass
@@ -229,14 +209,14 @@ def main():
xxr_analyser = ar_frame.scan_path_analyzers["ExploitExploreRatio"]
- cv2.putText(image, f'Exploit explore ratio: {xxr_analyser.exploit_explore_ratio:.3f}', (20, ar_frame.size[1]-360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
+ cv2.putText(frame_image, f'Exploit explore ratio: {xxr_analyser.exploit_explore_ratio:.3f}', (20, ar_frame.size[1]-360), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)
except KeyError:
pass
- # Draw image
- cv2.imshow(ar_frame.name, image)
+ # Display frame image
+ cv2.imshow(ar_frame.name, frame_image)
key_pressed = cv2.waitKey(10)
@@ -264,7 +244,7 @@ def main():
except KeyboardInterrupt:
pass
- # Stop image display
+ # Stop frame image display
cv2.destroyAllWindows()
if __name__ == '__main__':