aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThéo de la Hogue2024-06-24 18:06:54 +0200
committerThéo de la Hogue2024-06-24 18:06:54 +0200
commita9f5d765182f8851b0dab6768aafb52809a58503 (patch)
treea63ea6746a4c8d075c88efdbd48159d0d7975ea5
parent3c3890e8ec44b32c0df112b5c1ba6ada97fc4c13 (diff)
downloadargaze-a9f5d765182f8851b0dab6768aafb52809a58503.zip
argaze-a9f5d765182f8851b0dab6768aafb52809a58503.tar.gz
argaze-a9f5d765182f8851b0dab6768aafb52809a58503.tar.bz2
argaze-a9f5d765182f8851b0dab6768aafb52809a58503.tar.xz
Adding key interaction to draw help and start recording.
-rw-r--r--src/argaze/__main__.py96
1 files changed, 75 insertions, 21 deletions
diff --git a/src/argaze/__main__.py b/src/argaze/__main__.py
index df3d338..2647198 100644
--- a/src/argaze/__main__.py
+++ b/src/argaze/__main__.py
@@ -52,22 +52,51 @@ def load_context(args):
logging.info('%s pipe opened', args.pipe_path)
- def display(name, image, factor):
+ def display(name, image, factor = 0.75, draw_help = False):
"""Adapt image to display dimension."""
+ height, width, _ = image.shape
+
+ if draw_help:
+
+ cv2.rectangle(image, (int(width/4), int(height/3)), (int(width*3/4), int(height*2/3)), (127, 127, 127), -1)
+
+ info_stack = 1
+ cv2.putText(image, f'Help', (int(width/4)+20, int(height/3)+(info_stack*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+
+ if issubclass(type(context), LiveProcessingContext):
+
+ info_stack += 1
+ cv2.putText(image, f'Press Enter to start calibration', (int(width/4)+20, int(height/3)+(info_stack*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+
+ info_stack += 1
+ cv2.putText(image, f'Press r to start/stop recording', (int(width/4)+20, int(height/3)+(info_stack*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+
+ if issubclass(type(context), PostProcessingContext):
+
+ info_stack += 1
+ cv2.putText(image, f'Press Space bar to pause/resume processing', (int(width/3)+20, int(height/3)+(info_stack*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+
+ info_stack += 1
+ cv2.putText(image, f'Press f to pause/resume visualisation', (int(width/4)+20, int(height/3)+(info_stack*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+
+ info_stack += 1
+ cv2.putText(image, f'Press h to hide/show this help panel', (int(width/4)+20, int(height/3)+(info_stack*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+
+ info_stack += 1
+ cv2.putText(image, f'Press Escape to quit', (int(width/4)+20, int(height/3)+(info_stack*40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)
+
if args.display is not None:
display_size = tuple(args.display)
- height, width, _ = image.shape
+
image_ratio = width/height
new_image_size = (int(display_size[1] * factor * image_ratio), int(display_size[1] * factor))
- cv2.imshow(name, cv2.resize(image, dsize=new_image_size, interpolation=cv2.INTER_LINEAR))
-
- else:
+ image = cv2.resize(image, dsize=new_image_size, interpolation=cv2.INTER_LINEAR)
- cv2.imshow(name, image)
+ cv2.imshow(name, image)
# Load context from JSON file
with load(args.context_file) as context:
@@ -93,8 +122,9 @@ def load_context(args):
# Assess processing time
start_time = time.time()
- # Draw context pipeline by default
+ # Draw parameters
draw_pipeline = True
+ draw_help = True
# Waiting for 'ctrl+C' interruption
with contextlib.suppress(KeyboardInterrupt), os.fdopen(pipe_file) if args.pipe_path is not None else contextlib.nullcontext() as pipe:
@@ -125,7 +155,7 @@ def load_context(args):
try:
# Display context
- display(context.name, context.image(draw_pipeline=draw_pipeline), 0.75)
+ display(context.name, context.image(draw_pipeline=draw_pipeline), 0.75, draw_help=draw_help)
except SharedObjectBusy:
@@ -149,34 +179,58 @@ def load_context(args):
key_pressed = cv2.waitKey(40)
#print("key_pressed", key_pressed)
- # Esc: close window
- if key_pressed == 27:
+ # Enter: start calibration
+ if key_pressed == 13:
- raise KeyboardInterrupt()
+ if issubclass(type(context), LiveProcessingContext):
+
+ context.calibrate()
# Space bar: pause/resume pipeline processing
if key_pressed == 32:
- if context.is_paused():
+ if issubclass(type(context), PostProcessingContext):
+
+ if context.is_paused():
+
+ context.resume()
+
+ else:
+
+ context.pause()
+
+ # r: start/stop recording
+ if key_pressed == 114:
+
+ if issubclass(type(context), LiveProcessingContext):
- context.resume()
+ # FIXME: the following commands only work with TobiiGlassesPro2.LiveStream context.
+ recording_status = context.get_recording_status()
- else:
+ if recording_status == 'recording':
- context.pause()
+ context.stop_recording()
+
+ else:
+
+ context.create_recording()
+ context.start_recording()
# f: disable/enable pipeline drawing
if key_pressed == 102:
draw_pipeline = not draw_pipeline
- # Enter: start calibration
- if key_pressed == 13:
+ # h: disable/enable help drawing
+ if key_pressed == 104:
- if issubclass(type(context), LiveProcessingContext):
+ draw_help = not draw_help
- context.calibrate()
+ # Esc: close window
+ if key_pressed == 27:
+ raise KeyboardInterrupt()
+
# Window mode off
else:
@@ -185,12 +239,12 @@ def load_context(args):
prefix = f'Progression'
suffix = f'| {int(context.progression*context.duration * 1e-3)}s in {int(time.time()-start_time)}s'
- look_time, look_freq = context.process_gaze_position_performance()
+ look_time, look_freq = context.pipeline.execution_info('look')
suffix += f' | Look {look_time:.2f}ms at {look_freq}Hz'
if issubclass(type(context.pipeline), ArCamera):
- watch_time, watch_freq = context.process_camera_image_performance()
+ watch_time, watch_freq = context.pipeline.execution_info('watch')
suffix += f' | Watch {int(watch_time)}ms at {watch_freq}Hz'
# Clear old longer print