aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThéo de la Hogue2022-12-08 23:31:51 +0100
committerThéo de la Hogue2022-12-08 23:31:51 +0100
commita566491d0aacbc3a105c742608e7f15ae085f28e (patch)
treec822eedd58c9f89f3bf2f602807cf06e648b5164
parent4b1c52ed130b9be7c339b2a4bbb3621c362df5f7 (diff)
downloadargaze-a566491d0aacbc3a105c742608e7f15ae085f28e.zip
argaze-a566491d0aacbc3a105c742608e7f15ae085f28e.tar.gz
argaze-a566491d0aacbc3a105c742608e7f15ae085f28e.tar.bz2
argaze-a566491d0aacbc3a105c742608e7f15ae085f28e.tar.xz
Replacing accuracy by precision.
-rw-r--r--src/argaze.test/AreaOfInterest/AOI2DScene.py8
-rw-r--r--src/argaze/AreaOfInterest/AOI2DScene.py6
-rw-r--r--src/argaze/GazeFeatures.py37
-rw-r--r--src/argaze/utils/tobii_segment_arscene_edit.py2
-rw-r--r--src/argaze/utils/tobii_segment_aruco_aoi_edit.py430
-rw-r--r--src/argaze/utils/tobii_segment_display.py4
-rw-r--r--src/argaze/utils/tobii_segment_gaze_movements_export.py12
-rw-r--r--src/argaze/utils/tobii_stream_aruco_plan_display.py360
-rw-r--r--src/argaze/utils/tobii_stream_display.py4
9 files changed, 41 insertions, 822 deletions
diff --git a/src/argaze.test/AreaOfInterest/AOI2DScene.py b/src/argaze.test/AreaOfInterest/AOI2DScene.py
index 35f0f17..ecd6bbb 100644
--- a/src/argaze.test/AreaOfInterest/AOI2DScene.py
+++ b/src/argaze.test/AreaOfInterest/AOI2DScene.py
@@ -84,10 +84,10 @@ class TestAOI2DSceneClass(unittest.TestCase):
aoi_2D_B = AOIFeatures.AreaOfInterest([[1, 1], [1, 2], [2, 2], [2, 1]])
aoi_2d_scene = AOI2DScene.AOI2DScene({"A": aoi_2D_A, "B": aoi_2D_B})
- gaze_position_A = GazeFeatures.GazePosition((0.5, 0.5), accuracy=0.5)
- gaze_position_B = GazeFeatures.GazePosition((1.5, 1.5), accuracy=0.5)
- gaze_position_C = GazeFeatures.GazePosition((1., 1.), accuracy=1)
- gaze_position_D = GazeFeatures.GazePosition((0.5, 1.5), accuracy=0.25)
+ gaze_position_A = GazeFeatures.GazePosition((0.5, 0.5), precision=0.5)
+ gaze_position_B = GazeFeatures.GazePosition((1.5, 1.5), precision=0.5)
+ gaze_position_C = GazeFeatures.GazePosition((1., 1.), precision=1)
+ gaze_position_D = GazeFeatures.GazePosition((0.5, 1.5), precision=0.25)
# Check circlecast results for gaze postion A
for name, aoi, looked_region, aoi_ratio, gaze_ratio in aoi_2d_scene.circlecast(gaze_position_A):
diff --git a/src/argaze/AreaOfInterest/AOI2DScene.py b/src/argaze/AreaOfInterest/AOI2DScene.py
index f648803..d1f7fa0 100644
--- a/src/argaze/AreaOfInterest/AOI2DScene.py
+++ b/src/argaze/AreaOfInterest/AOI2DScene.py
@@ -59,18 +59,18 @@ class AOI2DScene(AOIFeatures.AOIScene):
aoi.draw(frame, color)
def circlecast(self, gaze_position: GazeFeatures.GazePosition) -> Tuple[str, "AOIFeatures.AreaOfInterest", numpy.array, float, float]:
- """Iterate over areas to know which aoi is looked considering gaze position value and its accuracy.
+ """Iterate over areas to know which aoi is looked considering gaze position value and its precision.
* **Returns:**
- aoi name
- aoi object
- looked region points
- ratio of looked region area relatively to aoi area
- - ratio of looked region area relatively to gaze position circle accuracy
+ - ratio of looked region area relatively to gaze position circle precision
"""
for name, aoi in self.items():
- looked_region, aoi_ratio, gaze_ratio = aoi.circle_intersection(gaze_position.value, gaze_position.accuracy)
+ looked_region, aoi_ratio, gaze_ratio = aoi.circle_intersection(gaze_position.value, gaze_position.precision)
yield name, aoi, looked_region, aoi_ratio, gaze_ratio
diff --git a/src/argaze/GazeFeatures.py b/src/argaze/GazeFeatures.py
index d7fcb1a..cef9aaa 100644
--- a/src/argaze/GazeFeatures.py
+++ b/src/argaze/GazeFeatures.py
@@ -14,13 +14,14 @@ import cv2 as cv
@dataclass(frozen=True)
class GazePosition():
- """Define gaze position as a tuple of coordinates with accuracy."""
+ """Define gaze position as a tuple of coordinates with precision."""
value: tuple[int | float] = field(default=(0, 0))
"""Position's value."""
- accuracy: float = field(default=0., kw_only=True)
- """Position's accuracy."""
+ precision: float = field(default=0., kw_only=True)
+ """Position's precision represents the radius of a circle around \
+ this gaze position value where other same gaze position measurements could be."""
def __getitem__(self, axis: int) -> int | float:
"""Get position value along a particular axis."""
@@ -49,12 +50,20 @@ class GazePosition():
@property
def valid(self) -> bool:
- """Is the accuracy not None?"""
+ """Is the precision not None?"""
- return self.accuracy is not None
+ return self.precision is not None
+
+ def overlap(self, gaze_position) -> float:
+ """Does this gaze position overlap another gaze position considering their precisions?"""
+
+ dist = (self.value[0] - gaze_position.value[0])**2 + (self.value[1] - gaze_position.value[1])**2
+ dist = numpy.sqrt(dist)
+
+ return dist < min(self.precision, gaze_position.precision)
def draw(self, frame, color=(0, 255, 255)):
- """Draw gaze position point and accuracy circle."""
+ """Draw gaze position point and precision circle."""
if self.valid:
@@ -63,16 +72,16 @@ class GazePosition():
# Draw point at position
cv.circle(frame, int_value, 2, color, -1)
- # Draw accuracy circle
- if self.accuracy > 0:
- cv.circle(frame, int_value, round(self.accuracy), color, 1)
+ # Draw precision circle
+ if self.precision > 0:
+ cv.circle(frame, int_value, round(self.precision), color, 1)
class UnvalidGazePosition(GazePosition):
"""Unvalid gaze position."""
def __init__(self):
- super().__init__((None, None), accuracy=None)
+ super().__init__((None, None), precision=None)
class TimeStampedGazePositions(DataStructures.TimeStampedBuffer):
"""Define timestamped buffer to store gaze positions."""
@@ -83,9 +92,9 @@ class TimeStampedGazePositions(DataStructures.TimeStampedBuffer):
# Convert dict into GazePosition
if type(value) == dict:
- assert(set(["value", "accuracy"]).issubset(value.keys()))
+ assert(set(["value", "precision"]).issubset(value.keys()))
- value = GazePosition(value["value"], accuracy=value["accuracy"])
+ value = GazePosition(value["value"], precision=value["precision"])
assert(type(value) == GazePosition or type(value) == UnvalidGazePosition)
@@ -119,7 +128,7 @@ class GazeMovement():
for ts, position in self.positions.items():
- output += f'\n\t{ts}:\n\t\tvalue={position.value},\n\t\taccurracy={position.accuracy}'
+ output += f'\n\t{ts}:\n\t\tvalue={position.value},\n\t\taccurracy={position.precision}'
return output
@@ -176,7 +185,7 @@ class GazeStatus(GazePosition):
def from_position(cls, gaze_position: GazePosition, movement_type: str, movement_index: int) -> GazeStatusType:
"""Initialize from a gaze position instance."""
- return cls(gaze_position.value, accuracy=gaze_position.accuracy, movement_type=movement_type, movement_index=movement_index)
+ return cls(gaze_position.value, precision=gaze_position.precision, movement_type=movement_type, movement_index=movement_index)
TimeStampedGazeStatusType = TypeVar('TimeStampedGazeStatus', bound="TimeStampedGazeStatus")
# Type definition for type annotation convenience
diff --git a/src/argaze/utils/tobii_segment_arscene_edit.py b/src/argaze/utils/tobii_segment_arscene_edit.py
index 587ca14..2a059aa 100644
--- a/src/argaze/utils/tobii_segment_arscene_edit.py
+++ b/src/argaze/utils/tobii_segment_arscene_edit.py
@@ -149,7 +149,7 @@ def main():
video_frame = last_frame.copy()
# Edit fake gaze position from pointer
- gaze_position = GazeFeatures.GazePosition(pointer, accuracy=2)
+ gaze_position = GazeFeatures.GazePosition(pointer, precision=2)
# Copy video frame to edit visualisation on it with out disrupting aruco tracking
visu_frame = video_frame.copy()
diff --git a/src/argaze/utils/tobii_segment_aruco_aoi_edit.py b/src/argaze/utils/tobii_segment_aruco_aoi_edit.py
deleted file mode 100644
index fc27b97..0000000
--- a/src/argaze/utils/tobii_segment_aruco_aoi_edit.py
+++ /dev/null
@@ -1,430 +0,0 @@
-#!/usr/bin/env python
-
-import argparse
-import os
-import json
-import time
-
-from argaze import DataStructures
-from argaze import GazeFeatures
-from argaze.TobiiGlassesPro2 import TobiiEntities, TobiiVideo
-from argaze.ArUcoMarkers import *
-from argaze.AreaOfInterest import *
-from argaze.utils import MiscFeatures
-
-import numpy
-import cv2 as cv
-
-def main():
- """
- Open video file with ArUco marker scene inside
- """
-
- # Manage arguments
- parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
- parser.add_argument('-s', '--segment_path', metavar='SEGMENT_PATH', type=str, default=None, help='segment path')
- parser.add_argument('-r', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)')
- parser.add_argument('-c', '--camera_calibration', metavar='CAM_CALIB', type=str, default=None, help='json camera calibration filepath')
- parser.add_argument('-p', '--aruco_tracker_configuration', metavar='TRACK_CONFIG', type=str, default=None, help='json aruco tracker configuration filepath')
- parser.add_argument('-md', '--marker_dictionary', metavar='MARKER_DICT', type=ArUcoMarkersDictionary.ArUcoMarkersDictionary, default='DICT_ARUCO_ORIGINAL', help='aruco marker dictionnary (DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL, DICT_APRILTAG_16h5, DICT_APRILTAG_25h9, DICT_APRILTAG_36h10, DICT_APRILTAG_36h11)')
- parser.add_argument('-ms', '--marker_size', metavar='MARKER_SIZE', type=float, default=6, help='aruco marker size (cm)')
- parser.add_argument('-mi', '--marker_id_scene', metavar='MARKER_ID_SCENE', type=json.loads, help='{"marker": "aoi scene filepath"} dictionary')
- parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)')
- parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction)
- args = parser.parse_args()
-
- if args.segment_path != None:
-
- # Manage markers id to track
- if args.marker_id_scene == None:
- print(f'Track any Aruco markers from the {args.marker_dictionary.name} dictionary')
- else:
- print(f'Track Aruco markers {list(args.marker_id_scene.keys())} from the {args.marker_dictionary.name} dictionary')
-
- # Manage destination path
- destination_path = '.'
- if args.output != None:
-
- if not os.path.exists(os.path.dirname(args.output)):
-
- os.makedirs(os.path.dirname(args.output))
- print(f'{os.path.dirname(args.output)} folder created')
-
- destination_path = args.output
-
- else:
-
- destination_path = args.segment_path
-
- # Export into a dedicated time range folder
- if args.time_range[1] != None:
- timerange_path = f'[{int(args.time_range[0])}s - {int(args.time_range[1])}s]'
- else:
- timerange_path = f'[all]'
-
- destination_path = f'{destination_path}/{timerange_path}'
-
- if not os.path.exists(destination_path):
-
- os.makedirs(destination_path)
- print(f'{destination_path} folder created')
-
- #vs_data_filepath = f'{destination_path}/visual_scan.csv'
-
- # Load a tobii segment
- tobii_segment = TobiiEntities.TobiiSegment(args.segment_path, int(args.time_range[0] * 1e6), int(args.time_range[1] * 1e6) if args.time_range[1] != None else None)
-
- # Load a tobii segment video
- tobii_segment_video = tobii_segment.load_video()
- print(f'Video properties:\n\tduration: {tobii_segment_video.duration/1e6} s\n\twidth: {tobii_segment_video.width} px\n\theight: {tobii_segment_video.height} px')
-
- # Create aruco camera
- aruco_camera = ArUcoCamera.ArUcoCamera()
-
- # Load calibration file
- if args.camera_calibration != None:
-
- aruco_camera.load_calibration_file(args.camera_calibration)
-
- else:
-
- raise ValueError('.json camera calibration filepath required. Use -c option.')
-
- # Create aruco tracker
- aruco_tracker = ArUcoTracker.ArUcoTracker(args.marker_dictionary, args.marker_size, aruco_camera)
-
- # Load specific configuration file
- def load_configuration_file():
-
- if args.aruco_tracker_configuration != None:
-
- aruco_tracker.load_configuration_file(args.aruco_tracker_configuration)
-
- print(f'ArUcoTracker configuration for {args.marker_dictionary.name} markers detection:')
- aruco_tracker.print_configuration()
-
- load_configuration_file()
-
- # Load AOI 3D scene for each marker
- aoi3D_scenes = {}
- aoi3D_scene_edits = {}
-
- for marker_id, aoi_scene_filepath in args.marker_id_scene.items():
-
- marker_id = int(marker_id)
-
- aoi3D_scenes[marker_id] = AOI3DScene.AOI3DScene()
- aoi3D_scenes[marker_id].load(aoi_scene_filepath)
-
- aoi3D_scene_edits[marker_id] = {
- 'rotation': numpy.array([0.0, 0.0, 0.0]),
- 'translation': numpy.array([0.0, 0.0, 0.0])
- }
-
- print(f'AOI in {os.path.basename(aoi_scene_filepath)} scene related to marker #{marker_id}:')
- for aoi in aoi3D_scenes[marker_id].keys():
- print(f'\t{aoi}')
-
- def aoi3D_scene_selector(marker_id):
- return aoi3D_scenes.get(marker_id, None)
-
- def aoi3D_scene_edit_selector(marker_id):
- return aoi3D_scene_edits.get(marker_id, None)
-
- # Display first frame
- video_ts, video_frame = tobii_segment_video.get_frame(0)
- cv.imshow(f'Segment {tobii_segment.id} ArUco marker editor', video_frame.matrix)
-
- # Init mouse interaction variables
- pointer = (0, 0)
- left_click = (0, 0)
- right_click = (0, 0)
- right_button = False
- edit_trans = False # translate
- edit_coord = 0 # x
-
- # On mouse left left_click : update pointer position
- def on_mouse_event(event, x, y, flags, param):
-
- nonlocal pointer
- nonlocal left_click
- nonlocal right_click
- nonlocal right_button
-
- # Update pointer
- pointer = (x, y)
-
- # Update left_click
- if event == cv.EVENT_LBUTTONUP:
-
- left_click = pointer
-
- # Udpate right_button
- elif event == cv.EVENT_RBUTTONDOWN:
-
- right_button = True
-
- elif event == cv.EVENT_RBUTTONUP:
-
- right_button = False
-
- # Udpate right_click
- if right_button:
-
- right_click = pointer
-
- cv.setMouseCallback(f'Segment {tobii_segment.id} ArUco marker editor', on_mouse_event)
-
- # Frame selector loop
- frame_index = 0
- last_frame_index = -1
- last_frame = video_frame.copy()
- force_update = False
-
- selected_marker_id = -1
-
- try:
-
- while True:
-
- # Select a frame on change
- if frame_index != last_frame_index or force_update:
-
- video_ts, video_frame = tobii_segment_video.get_frame(frame_index)
- video_ts_ms = video_ts / 1e3
-
- last_frame_index = frame_index
- last_frame = video_frame.copy()
-
- # Hide frame left and right borders before tracking to ignore markers outside focus area
- cv.rectangle(video_frame.matrix, (0, 0), (int(video_frame.width/6), int(video_frame.height)), (0, 0, 0), -1)
- cv.rectangle(video_frame.matrix, (int(video_frame.width*(1 - 1/6)), 0), (int(video_frame.width), int(video_frame.height)), (0, 0, 0), -1)
-
- # Track markers with pose estimation
- aruco_tracker.track(video_frame.matrix)
-
- else:
-
- video_frame = last_frame.copy()
-
- # Edit fake gaze position from pointer
- gaze_position = GazeFeatures.GazePosition(pointer, accuracy=2)
-
- # Copy video frame to edit visualisation on it with out disrupting aruco tracking
- visu_frame = video_frame.copy()
-
- # Draw markers and pose estimation
- aruco_tracker.draw_tracked_markers(visu_frame.matrix)
-
- # Project 3D scene on each video frame and the visualisation frame
- if len(aruco_tracker.tracked_markers.keys()) > 0:
-
- # Write detected marker ids
- cv.putText(visu_frame.matrix, f'Detected markers: {list(aruco_tracker.tracked_markers.keys())}', (20, visu_frame.height - 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
-
- # Update selected marker id by left_clicking on marker
- for (marker_id, marker) in aruco_tracker.tracked_markers.items():
-
- marker_aoi = marker.corners.reshape(4, 2).view(AOIFeatures.AreaOfInterest)
-
- if marker_aoi.contains_point(left_click):
-
- selected_marker_id = marker_id
-
- # Select 3D scene related to selected marker
- aoi3D_scene = aoi3D_scene_selector(selected_marker_id)
-
- # If a marker is selected
- try:
-
- # Retreive marker index
- selected_marker = aruco_tracker.tracked_markers[selected_marker_id]
-
- if aoi3D_scene == None:
- raise UserWarning('No AOI 3D scene')
-
- # Select scene edit
- aoi3D_scene_edit = aoi3D_scene_edit_selector(selected_marker_id)
-
- # Edit scene
- if aoi3D_scene_edit != None:
-
- marker_x, marker_y = selected_marker.center
-
- if right_button:
-
- pointer_delta_x, pointer_delta_y = (right_click[0] - marker_x) / (visu_frame.width/3), (marker_y - right_click[1]) / (visu_frame.width/3)
-
- if edit_trans:
-
- # Edit scene rotation
- if edit_coord == 0:
- aoi3D_scene_edit['rotation'] = numpy.array([pointer_delta_y, aoi3D_scene_edit['rotation'][1], aoi3D_scene_edit['rotation'][2]])
-
- elif edit_coord == 1:
- aoi3D_scene_edit['rotation'] = numpy.array([aoi3D_scene_edit['rotation'][0], pointer_delta_x, aoi3D_scene_edit['rotation'][2]])
-
- elif edit_coord == 2:
- aoi3D_scene_edit['rotation'] = numpy.array([aoi3D_scene_edit['rotation'][0], aoi3D_scene_edit['rotation'][1], -1*pointer_delta_y])
-
- else:
-
- # Edit scene translation
- if edit_coord == 0:
- aoi3D_scene_edit['translation'] = numpy.array([pointer_delta_x, aoi3D_scene_edit['translation'][1], aoi3D_scene_edit['translation'][2]])
-
- elif edit_coord == 1:
- aoi3D_scene_edit['translation'] = numpy.array([aoi3D_scene_edit['translation'][0], pointer_delta_y, aoi3D_scene_edit['translation'][2]])
-
- elif edit_coord == 2:
- aoi3D_scene_edit['translation'] = numpy.array([aoi3D_scene_edit['translation'][0], aoi3D_scene_edit['translation'][1], 2*pointer_delta_y])
-
- # Apply transformation
- aoi3D_scene_edited = aoi3D_scene.transform(aoi3D_scene_edit['translation'], aoi3D_scene_edit['rotation'])
-
- cv.rectangle(visu_frame.matrix, (0, 130), (460, 450), (127, 127, 127), -1)
-
- # Write rotation matrix
- R, _ = cv.Rodrigues(aoi3D_scene_edit['rotation'])
- cv.putText(visu_frame.matrix, f'Rotation matrix:', (20, 160), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
- cv.putText(visu_frame.matrix, f'{R[0][0]:.3f} {R[0][1]:.3f} {R[0][2]:.3f}', (40, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA)
- cv.putText(visu_frame.matrix, f'{R[1][0]:.3f} {R[1][1]:.3f} {R[1][2]:.3f}', (40, 240), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv.LINE_AA)
- cv.putText(visu_frame.matrix, f'{R[2][0]:.3f} {R[2][1]:.3f} {R[2][2]:.3f}', (40, 280), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv.LINE_AA)
-
- # Write translation vector
- T = aoi3D_scene_edit['translation']
- cv.putText(visu_frame.matrix, f'Translation vector:', (20, 320), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
- cv.putText(visu_frame.matrix, f'{T[0]:.3f}', (40, 360), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA)
- cv.putText(visu_frame.matrix, f'{T[1]:.3f}', (40, 400), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv.LINE_AA)
- cv.putText(visu_frame.matrix, f'{T[2]:.3f}', (40, 440), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1, cv.LINE_AA)
-
- # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it
- # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable.
- aoi2D_video_scene = aoi3D_scene_edited.project(selected_marker.translation, selected_marker.rotation, aruco_camera.K)
-
- # Draw aoi scene
- aoi2D_video_scene.draw_raycast(visu_frame.matrix, gaze_position)
-
- # Write warning related to marker pose processing
- except UserWarning as e:
-
- cv.putText(visu_frame.matrix, f'Marker {selected_marker_id}: {e}', (20, 120), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA)
-
- except KeyError:
-
- # Write error
- if selected_marker_id >= 0:
- cv.putText(visu_frame.matrix, f'Marker {selected_marker_id} not found', (20, 120), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1, cv.LINE_AA)
-
- # Draw focus area
- cv.rectangle(visu_frame.matrix, (int(visu_frame.width/6), 0), (int(visu_frame.width*(1-1/6)), int(visu_frame.height)), (255, 150, 150), 1)
-
- # Draw center
- cv.line(visu_frame.matrix, (int(visu_frame.width/2) - 50, int(visu_frame.height/2)), (int(visu_frame.width/2) + 50, int(visu_frame.height/2)), (255, 150, 150), 1)
- cv.line(visu_frame.matrix, (int(visu_frame.width/2), int(visu_frame.height/2) - 50), (int(visu_frame.width/2), int(visu_frame.height/2) + 50), (255, 150, 150), 1)
-
- # Draw pointer
- gaze_position.draw(visu_frame.matrix)
-
- # Write segment timing
- cv.rectangle(visu_frame.matrix, (0, 0), (550, 50), (63, 63, 63), -1)
- cv.putText(visu_frame.matrix, f'Segment time: {int(video_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
-
- # Write selected marker id
- if selected_marker_id >= 0:
-
- cv.rectangle(visu_frame.matrix, (0, 50), (550, 90), (127, 127, 127), -1)
-
- # Select color
- if edit_coord == 0:
- color_axis = (0, 0, 255)
-
- elif edit_coord == 1:
- color_axis = (0, 255, 0)
-
- elif edit_coord == 2:
- color_axis = (255, 0, 0)
-
- if edit_trans:
- cv.putText(visu_frame.matrix, f'Rotate marker {selected_marker_id} around axis {edit_coord + 1}', (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, color_axis, 1, cv.LINE_AA)
- else:
- cv.putText(visu_frame.matrix, f'Translate marker {selected_marker_id} along axis {edit_coord + 1}', (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, color_axis, 1, cv.LINE_AA)
-
- # Write documentation
- else:
- cv.rectangle(visu_frame.matrix, (0, 50), (650, 250), (127, 127, 127), -1)
- cv.putText(visu_frame.matrix, f'> Left click on marker: select scene', (20, 80), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
- cv.putText(visu_frame.matrix, f'> T: translate, R: rotate', (20, 120), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
- cv.putText(visu_frame.matrix, f'> Shift + 0/1/2: select axis', (20, 160), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
- cv.putText(visu_frame.matrix, f'> Right click and drag: edit axis', (20, 200), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
- cv.putText(visu_frame.matrix, f'> Ctrl + S: save scene', (20, 240), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
-
- # Reset left_click
- left_click = (0, 0)
-
- if args.window:
-
- key_pressed = cv.waitKey(1)
-
- #if key_pressed != -1:
- # print(key_pressed)
-
- # Select previous frame with left arrow
- if key_pressed == 2:
- frame_index -= 1
-
- # Select next frame with right arrow
- if key_pressed == 3:
- frame_index += 1
-
- # Clip frame index
- if frame_index < 0:
- frame_index = 0
-
- # Edit rotation with r key
- if key_pressed == 114:
- edit_trans = True
-
- # Edit translation with t key
- if key_pressed == 116:
- edit_trans = False
-
- # Select coordinate to edit with Shift + 0, 1 or 2
- if key_pressed == 49 or key_pressed == 50 or key_pressed == 51:
- edit_coord = key_pressed - 49
-
- # Save selected marker edition using 'Ctrl + s'
- if key_pressed == 19:
-
- if selected_marker_id >= 0 and aoi3D_scene_edit != None:
-
- aoi_scene_filepath = args.marker_id_scene[f'{selected_marker_id}']
- aoi3D_scene_edited.save(aoi_scene_filepath)
-
- print(f'Saving scene related to marker #{selected_marker_id} into {aoi_scene_filepath}')
-
- # Close window using 'Esc' key
- if key_pressed == 27:
- break
-
- # Reload tracker configuration on 'c' key
- if key_pressed == 99:
- load_configuration_file()
- force_update = True
-
- # Display video
- cv.imshow(f'Segment {tobii_segment.id} ArUco marker editor', visu_frame.matrix)
-
- # Wait 1 second
- time.sleep(1)
-
- # Exit on 'ctrl+C' interruption
- except KeyboardInterrupt:
- pass
-
- # Stop frame display
- cv.destroyAllWindows()
-
-if __name__ == '__main__':
-
- main() \ No newline at end of file
diff --git a/src/argaze/utils/tobii_segment_display.py b/src/argaze/utils/tobii_segment_display.py
index 8d8f037..dbef15f 100644
--- a/src/argaze/utils/tobii_segment_display.py
+++ b/src/argaze/utils/tobii_segment_display.py
@@ -93,10 +93,10 @@ def main():
# Ignore frame when gaze position 3D is not valid
if nearest_gaze_position_3d.validity == 0:
- gaze_accuracy_mm = numpy.tan(numpy.deg2rad(TobiiSpecifications.ACCURACY)) * nearest_gaze_position_3d.value[2]
+ gaze_precision_mm = numpy.tan(numpy.deg2rad(TobiiSpecifications.ACCURACY)) * nearest_gaze_position_3d.value[2]
tobii_camera_hfov_mm = numpy.tan(numpy.deg2rad(TobiiSpecifications.CAMERA_HFOV / 2)) * nearest_gaze_position_3d.value[2]
- gaze_position_pixel.accuracy = round(video_frame.width * float(gaze_accuracy_mm) / float(tobii_camera_hfov_mm))
+ gaze_position_pixel.precision = round(video_frame.width * float(gaze_precision_mm) / float(tobii_camera_hfov_mm))
# Draw gaze
gaze_position_pixel.draw(video_frame.matrix)
diff --git a/src/argaze/utils/tobii_segment_gaze_movements_export.py b/src/argaze/utils/tobii_segment_gaze_movements_export.py
index 9b6ee5c..fd36200 100644
--- a/src/argaze/utils/tobii_segment_gaze_movements_export.py
+++ b/src/argaze/utils/tobii_segment_gaze_movements_export.py
@@ -25,7 +25,7 @@ def main():
parser.add_argument('-a', '--aoi', metavar='AOI_NAME', type=str, default=None, help='aoi name where to project gaze', required=True)
parser.add_argument('-t', '--time_range', metavar=('START_TIME', 'END_TIME'), nargs=2, type=float, default=(0., None), help='start and end time (in second)')
parser.add_argument('-di', '--dispersion_threshold', metavar='DISPERSION_THRESHOLD', type=int, default=50, help='dispersion threshold in pixel')
- parser.add_argument('-du', '--duration_threshold', metavar='DURATION_THRESHOLD', type=int, default=100, help='duration threshold in millisecond')
+ parser.add_argument('-du', '--duration_threshold', metavar='DURATION_THRESHOLD', type=int, default=200, help='duration threshold in millisecond')
parser.add_argument('-o', '--output', metavar='OUT', type=str, default=None, help='destination folder path (segment folder by default)')
parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction)
args = parser.parse_args()
@@ -102,7 +102,7 @@ def main():
# Access to timestamped gaze 3D positions data buffer
tobii_ts_gaze_positions_3d = tobii_segment_data['GazePosition3D']
- # Format tobii gaze position and accuracy in pixel and project it in aoi scene
+ # Format tobii gaze position and precision in pixel and project it in aoi scene
ts_gaze_positions = GazeFeatures.TimeStampedGazePositions()
# Gaze projection metrics
@@ -150,13 +150,13 @@ def main():
# Test gaze position 3d validity
if tobii_gaze_position_3d.validity == 0:
- gaze_accuracy_mm = numpy.sin(numpy.deg2rad(TobiiSpecifications.ACCURACY)) * tobii_gaze_position_3d.value[2]
+ gaze_precision_mm = numpy.sin(numpy.deg2rad(TobiiSpecifications.ACCURACY)) * tobii_gaze_position_3d.value[2]
tobii_camera_hfov_mm = numpy.sin(numpy.deg2rad(TobiiSpecifications.CAMERA_HFOV)) * tobii_gaze_position_3d.value[2]
- gaze_accuracy_px = round(tobii_segment_video.width * float(gaze_accuracy_mm) / float(tobii_camera_hfov_mm))
+ gaze_precision_px = round(tobii_segment_video.width * float(gaze_precision_mm) / float(tobii_camera_hfov_mm))
# Edit gaze position
- gaze_position = GazeFeatures.GazePosition(gaze_position_px, accuracy=gaze_accuracy_px)
+ gaze_position = GazeFeatures.GazePosition(gaze_position_px, precision=gaze_precision_px)
# Project gaze position into selected aois
if selected_aoi.contains_point(gaze_position.value):
@@ -239,7 +239,7 @@ def main():
# Export gaze metrics
# Consider only fixations > duration threshold and saccades < duration threshold
- # This is mostly usefull to filter first and last fixation/saccade as the time range can start anywhere
+ # This is mostly useful to filter first and last fixation/saccade as the time range can start anywhere
filtered_fixations = fixations_dataframe[fixations_dataframe.duration > args.duration_threshold*1e3]
filtered_saccades = saccades_dataframe[saccades_dataframe.duration < args.duration_threshold*1e3]
diff --git a/src/argaze/utils/tobii_stream_aruco_plan_display.py b/src/argaze/utils/tobii_stream_aruco_plan_display.py
deleted file mode 100644
index 16fc8ef..0000000
--- a/src/argaze/utils/tobii_stream_aruco_plan_display.py
+++ /dev/null
@@ -1,360 +0,0 @@
-#!/usr/bin/env python
-
-import argparse
-import os, json
-import math
-import threading
-
-from argaze import DataStructures
-from argaze import GazeFeatures
-from argaze.TobiiGlassesPro2 import *
-from argaze.ArUcoMarkers import *
-from argaze.AreaOfInterest import *
-from argaze.utils import MiscFeatures
-
-import cv2 as cv
-import numpy
-
-def make_rotation_matrix(x, y, z):
-
- # Create rotation matrix around x axis
- c = numpy.cos(numpy.deg2rad(x))
- s = numpy.sin(numpy.deg2rad(x))
- Rx = numpy.array([[1, 0, 0], [0, c, -s], [0, s, c]])
-
- # Create rotation matrix around y axis
- c = numpy.cos(numpy.deg2rad(y))
- s = numpy.sin(numpy.deg2rad(y))
- Ry = numpy.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
-
- # Create rotation matrix around z axis
- c = numpy.cos(numpy.deg2rad(z))
- s = numpy.sin(numpy.deg2rad(z))
- Rz = numpy.array([[c, -s, 0], [s, c, 0], [0, 0, 1]])
-
- # Return intrinsic rotation matrix
- return Rx.dot(Ry.dot(Rz))
-
-def main():
- """
- Track ArUcoPlan into Tobii Glasses Pro 2 camera video stream.
- """
-
- # Manage arguments
- parser = argparse.ArgumentParser(description=main.__doc__.split('-')[0])
- parser.add_argument('-t', '--tobii_ip', metavar='TOBII_IP', type=str, default=None, help='tobii glasses ip')
- parser.add_argument('-c', '--camera_calibration', metavar='CAM_CALIB', type=str, default=None, help='json camera calibration filepath')
- parser.add_argument('-p', '--aruco_tracker_configuration', metavar='TRACK_CONFIG', type=str, default=None, help='json aruco tracker configuration filepath')
- parser.add_argument('-i', '--imu_calibration', metavar='IMU_CALIB', type=str, default=None, help='json imu calibration filepath')
- parser.add_argument('-ac', '--aruco_plan', metavar='ARUCO_PLAN', type=str, help='json aruco plan description filepath')
- parser.add_argument('-s', '--aoi_scene', metavar='AOI_SCENE', type=str, help='obj aoi 3D scene description filepath')
- parser.add_argument('-w', '--window', metavar='DISPLAY', type=bool, default=True, help='enable window display', action=argparse.BooleanOptionalAction)
- args = parser.parse_args()
-
- # Create tobii controller (with auto discovery network process if no ip argument is provided)
- print('\nLooking for a Tobii Glasses Pro 2 device ...')
-
- try:
-
- tobii_controller = TobiiController.TobiiController(args.tobii_ip)
- print(f'Tobii Glasses Pro 2 device found at {tobii_controller.address} address.')
-
- except ConnectionError as e:
-
- print(e)
- exit()
-
- # Setup camera at 25 fps to work on Full HD video stream
- tobii_controller.set_scene_camera_freq_25()
-
- # Print current confirugration
- print(f'Tobii Glasses Pro 2 configuration:')
- for key, value in tobii_controller.get_configuration().items():
- print(f'\t{key}: {value}')
-
- # Enable tobii data stream
- tobii_data_stream = tobii_controller.enable_data_stream()
-
- # Enable tobii video stream
- tobii_video_stream = tobii_controller.enable_video_stream()
-
- # Load aruco plan description
- aruco_plan = ArUcoPlan.ArUcoPlan(args.aruco_plan)
- aruco_plan.print_cache()
-
- # Load AOI 3D scene centered onto aruco plan
- aoi3D_scene = AOI3DScene.AOI3DScene()
- aoi3D_scene.load(args.aoi_scene)
-
- print(f'\nAOI in {os.path.basename(args.aoi_scene)} scene related to ArPlan:')
- for aoi in aoi3D_scene.keys():
- print(f'\t{aoi}')
-
- # Create aruco camera
- aruco_camera = ArUcoCamera.ArUcoCamera()
-
- # Load calibration file
- if args.camera_calibration != None:
-
- aruco_camera.load_calibration_file(args.camera_calibration)
-
- else:
-
- raise UserWarning('.json camera calibration filepath required. Use -c option.')
-
- # Create aruco tracker
- aruco_tracker = ArUcoTracker.ArUcoTracker(aruco_plan.dictionary, aruco_plan.marker_size, aruco_camera)
-
- # Load specific configuration file
- if args.aruco_tracker_configuration != None:
-
- aruco_tracker.load_configuration_file(args.aruco_tracker_configuration)
-
- print(f'\nArUcoTracker configuration for markers detection:')
- aruco_tracker.print_configuration()
-
- # Create tobii imu handler to track head pose changes when arcuco plan pose can't be estimated
- # So, the resulting head pose is relative to last pose estimation
- tobii_imu = TobiiInertialMeasureUnit.TobiiInertialMeasureUnit()
-
- # Load optional imu calibration file
- if args.imu_calibration != None:
-
- tobii_imu.load_calibration_file(args.imu_calibration)
-
- # Init tobii imu lock
- tobii_imu_lock = threading.Lock()
-
- # TEST : DIFF ACC
- last_accl = numpy.zeros(3)
-
- # Init data timestamped in millisecond
- data_ts_ms = 0
-
- # Assess temporal performance
- loop_chrono = MiscFeatures.TimeProbe()
- loop_ps = 0
-
- def data_stream_callback(data_ts, data_object, data_object_type):
-
- nonlocal tobii_imu
- nonlocal tobii_imu_lock
- nonlocal data_ts_ms
-
- #TEST
- nonlocal last_accl
-
- data_ts_ms = data_ts / 1e3
-
- # Don't update imu when it is used
- if tobii_imu_lock.locked():
- return
-
- # Lock tobii imu updates
- tobii_imu_lock.acquire()
-
- match data_object_type:
-
- case 'Gyroscope':
-
- data_object = tobii_imu.apply_gyroscope_offset(data_object)
-
- tobii_imu.update_rotation(data_ts, data_object)
-
-
- case 'Accelerometer':
- pass
- '''
- print('raw accelerometer(m/s2)=', data_object.value)
-
- # TEST :
- diff_accl = last_accl - numpy.array(data_object.value)
- last_accl = numpy.array(data_object.value)
- print('\tdiff(cm/s2)=', 100 * numpy.linalg.norm(diff_accl))
-
- # TEST : ignore acceleration double
- if numpy.linalg.norm(diff_accl) > 0.:
-
- data_object = tobii_imu.apply_accelerometer_coefficients(data_object)
-
- print('corrected accelerometer(m/s2)=', data_object.value)
-
- print('current plumb=', tobii_imu.get_plumb())
-
- data_object = tobii_imu.apply_plumb(data_object)
-
- print('corrected accelerometer - gravity(m/s2)=', data_object.value)
- print('\tnorm(cm/s2)=', 100 * numpy.linalg.norm(data_object.value))
-
- tobii_imu.update_translation(data_ts, data_object)
- '''
- # Unlock tobii imu updates
- tobii_imu_lock.release()
-
- tobii_data_stream.reading_callback = data_stream_callback
-
- # Start streaming
- tobii_controller.start_streaming()
-
- # Live video stream capture loop
- try:
-
- # Assess loop performance
- loop_chrono = MiscFeatures.TimeProbe()
- fps = 0
-
- # Track aruco plan pose
- aruco_plan_tvec = numpy.zeros(3)
- aruco_plan_rvec = numpy.zeros(3)
- aruco_plan_success = False
- aruco_plan_validity = False
- aruco_plan_ts_ms = 0
-
- while tobii_video_stream.is_alive():
-
- # Read video stream
- video_ts, video_frame = tobii_video_stream.read()
- video_ts_ms = video_ts / 1e3
-
- # Copy video frame to edit visualisation on it without disrupting aruco tracking
- visu_frame = video_frame.copy()
-
- # Process video and data frame
- try:
-
- # Track markers with pose estimation
- aruco_tracker.track(video_frame.matrix)
- #aruco_tracker.draw_tracked_markers(visu_frame.matrix)
-
- # Estimate plan pose from tracked markers
- tvec, rvec, success, validity = aruco_plan.estimate_pose(aruco_tracker.tracked_markers)
-
- # Plan pose estimation succeed and is validated by 1 faces at least
- if success and validity >= 1:
-
- # Lock tobii imu updates
- tobii_imu_lock.acquire()
-
- # Reset head rotation, translation and translation speed (cm/s)
- # Note : head translation speed is also estimated thanks to accelerometer sensor (see upward)
- tobii_imu.reset_rotation()
- #tobii_imu.reset_translation(translation_speed = (tvec - aruco_plan_tvec) / (video_ts_ms - aruco_plan_ts_ms))
-
- # Create a rotation matrix to transform plan rotation from camera referential to imu referential
- F = make_rotation_matrix(*TobiiInertialMeasureUnit.CAMERA_TO_IMU_ROTATION_VECTOR)
- R, _ = cv.Rodrigues(rvec)
- rvec_flipped, _ = cv.Rodrigues(F.dot(R))
-
- # Update head plumb orientation with flipped plan orientation
- tobii_imu.rotate_plumb(rvec_flipped)
-
- # Unlock tobii imu updates
- tobii_imu_lock.release()
-
- # Store plan pose
- aruco_plan_tvec = tvec
- aruco_plan_rvec = rvec
- aruco_plan_success = success
- aruco_plan_validity = validity
- aruco_plan_ts_ms = video_ts_ms
-
- # Plan pose estimation fails
- elif aruco_plan_success:
-
- # Use tobii glasses inertial sensors to estimate plan pose from last estimated pose
-
- # Translate plan into imu referential
- imu_tvec = aruco_plan_tvec + numpy.array(TobiiInertialMeasureUnit.CAMERA_TO_IMU_TRANSLATION_VECTOR)
-
- # Translate plan according head translation
- imu_tvec = imu_tvec + tobii_imu.translation
-
- # Rotate plan around imu origin according head rotation
- imu_rvec = tobii_imu.rotation * numpy.array([-1., -1., 1.])
- imu_R = make_rotation_matrix(*imu_rvec)
- new_imu_tvec = imu_tvec.dot(imu_R)
-
- # Translate back plan into camera referential
- new_tvec = new_imu_tvec - numpy.array(TobiiInertialMeasureUnit.CAMERA_TO_IMU_TRANSLATION_VECTOR)
-
- # Rotate plan orientation (supposing plan top is up in )
- imu_rvec = tobii_imu.rotation * numpy.array([1., -1., 1.])
- imu_R = make_rotation_matrix(*imu_rvec)
-
- C, _ = cv.Rodrigues(aruco_plan_rvec)
- C = C.dot(imu_R)
- new_rvec, _ = cv.Rodrigues(C)
- #new_rvec = aruco_plan_rvec
-
- # Set plan pose estimation
- aruco_plan.translation = new_tvec
- aruco_plan.rotation = new_rvec
-
- else:
-
- raise UserWarning('Plan pose estimation fails.')
-
- # Project AOI 3 scene onto camera frame
-
- # DON'T APPLY CAMERA DISTORSION : it projects points which are far from the frame into it
- # This hack isn't realistic but as the gaze will mainly focus on centered AOI, where the distorsion is low, it is acceptable.
- aoi2D_scene = aoi3D_scene.project(aruco_plan.translation, aruco_plan.rotation, aruco_camera.K)
-
- # Draw projected scene
- #aoi2D_scene.draw(visu_frame.matrix)
-
- # Draw markers pose estimation
- #aruco_tracker.draw_tracked_markers(visu_frame.matrix)
-
- # Draw plan pose estimation (without camera distorsion)
- aruco_plan.draw(visu_frame.matrix, aruco_camera.K, ArUcoCamera.D0, draw_places=True)
-
- # Warn about plan pose validity
- if not aruco_plan_validity:
-
- raise UserWarning('Plan pose estimation is not validated.')
-
- # Write warning
- except UserWarning as w:
-
- cv.rectangle(visu_frame.matrix, (0, 100), (600, 150), (127, 127, 127), -1)
- cv.putText(visu_frame.matrix, str(w), (20, 140), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv.LINE_AA)
-
- # Assess loop performance
- lap_time, lap_counter, elapsed_time = loop_chrono.lap()
-
- # Update fps each 10 loops
- if lap_counter >= 10:
-
- loop_ps = 1e3 * lap_counter / elapsed_time
- loop_chrono.restart()
-
- # Draw center
- cv.line(visu_frame.matrix, (int(visu_frame.width/2) - 50, int(visu_frame.height/2)), (int(visu_frame.width/2) + 50, int(visu_frame.height/2)), (255, 150, 150), 1)
- cv.line(visu_frame.matrix, (int(visu_frame.width/2), int(visu_frame.height/2) - 50), (int(visu_frame.width/2), int(visu_frame.height/2) + 50), (255, 150, 150), 1)
-
- # Write stream timing
- cv.rectangle(visu_frame.matrix, (0, 0), (1100, 50), (63, 63, 63), -1)
- cv.putText(visu_frame.matrix, f'Data stream time: {int(data_ts_ms)} ms', (20, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
- cv.putText(visu_frame.matrix, f'Video delay: {int(data_ts_ms - video_ts_ms)} ms', (550, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
- cv.putText(visu_frame.matrix, f'Fps: {int(loop_ps)}', (950, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv.LINE_AA)
-
- cv.imshow(f'Stream ArUcoPlan', visu_frame.matrix)
-
- # Close window using 'Esc' key
- if cv.waitKey(1) == 27:
- break
-
- # Exit on 'ctrl+C' interruption
- except KeyboardInterrupt:
- pass
-
- # Stop frame display
- cv.destroyAllWindows()
-
- # Stop streaming
- tobii_controller.stop_streaming()
-
-if __name__ == '__main__':
-
- main() \ No newline at end of file
diff --git a/src/argaze/utils/tobii_stream_display.py b/src/argaze/utils/tobii_stream_display.py
index 2611b35..34e09f1 100644
--- a/src/argaze/utils/tobii_stream_display.py
+++ b/src/argaze/utils/tobii_stream_display.py
@@ -109,10 +109,10 @@ def main():
# Ignore frame when gaze position 3D is not valid
if data_object.validity == 0:
- gaze_accuracy_mm = numpy.tan(numpy.deg2rad(TobiiSpecifications.ACCURACY)) * data_object.value[2]
+ gaze_precision_mm = numpy.tan(numpy.deg2rad(TobiiSpecifications.ACCURACY)) * data_object.value[2]
tobii_camera_hfov_mm = numpy.tan(numpy.deg2rad(TobiiSpecifications.CAMERA_HFOV / 2)) * data_object.value[2]
- gaze_position_px.accuracy = round(video_frame.width * float(gaze_accuracy_mm) / float(tobii_camera_hfov_mm))
+ gaze_position_px.precision = round(video_frame.width * float(gaze_precision_mm) / float(tobii_camera_hfov_mm))
tobii_data_stream.reading_callback = data_stream_callback