|
@@ -30,13 +30,21 @@ parameters = aruco.DetectorParameters_create()
|
|
|
|
|
|
|
|
|
def find_marker(image, debug=True):
|
|
|
- # Our operations on the frame come here
|
|
|
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
|
|
if debug:
|
|
|
image[:,:,0] = gray
|
|
|
image[:,:,1] = gray
|
|
|
image[:,:,2] = gray
|
|
|
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
|
|
|
+
|
|
|
+ if debug:
|
|
|
+ for corner in rejectedImgPoints:
|
|
|
+ print(corner)
|
|
|
+ c = corner[0]
|
|
|
+ cv2.line(image, tuple(c[0]), tuple(c[1]), (0, 255, 255), 1)
|
|
|
+ cv2.line(image, tuple(c[1]), tuple(c[2]), (0, 255, 255), 1)
|
|
|
+ cv2.line(image, tuple(c[2]), tuple(c[3]), (0, 255, 255), 1)
|
|
|
+ cv2.line(image, tuple(c[3]), tuple(c[0]), (0, 255, 255), 1)
|
|
|
|
|
|
markers = [None] * 4
|
|
|
|
|
@@ -44,12 +52,13 @@ def find_marker(image, debug=True):
|
|
|
return markers
|
|
|
|
|
|
for corner, id in zip(corners, ids.flatten()):
|
|
|
- # draw the bounding box of the ArUCo detection
|
|
|
+ # calculate the average of all 4 corners
|
|
|
c = corner[0]
|
|
|
cX = sum(c[:,0]) / 4
|
|
|
cY = sum(c[:,1]) / 4
|
|
|
|
|
|
if debug:
|
|
|
+ # draw the bounding box of the ArUCo detection
|
|
|
cv2.line(image, tuple(c[0]), tuple(c[1]), (0, 255, 0), 2)
|
|
|
cv2.line(image, tuple(c[1]), tuple(c[2]), (0, 255, 0), 2)
|
|
|
cv2.line(image, tuple(c[2]), tuple(c[3]), (0, 255, 0), 2)
|
|
@@ -67,8 +76,9 @@ def measureDistances(image, debug=True):
|
|
|
markers = find_marker(image, debug)
|
|
|
|
|
|
if markers[0] and markers[1]:
|
|
|
-
|
|
|
+ # the distance between marker 0 and marker 1
|
|
|
distance = np.sqrt((markers[0][0]-markers[1][0])**2 + (markers[0][1]-markers[1][1])**2)
|
|
|
+ # the center point between marker 0 and 1
|
|
|
centerX = (markers[0][0]+markers[1][0])/2
|
|
|
centerY = (markers[0][1]+markers[1][1])/2
|
|
|
|
|
@@ -80,6 +90,7 @@ def measureDistances(image, debug=True):
|
|
|
cv2.putText(image, "%.2fpx" % (distance), (int(markers[0][0]), int(markers[0][1]+25)), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)
|
|
|
cv2.putText(image, "%.2fpx" % (centerX) , (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)
|
|
|
|
|
|
+ # normalize pixels to percent of horizontal resolution
|
|
|
return centerX/image.shape[1], distance/image.shape[1]
|
|
|
|
|
|
else:
|
|
@@ -97,16 +108,14 @@ class OpticalSensor():
|
|
|
self.x_offset = float(conf["opt_sensor"]["x_offset"])
|
|
|
self.fov = float(conf["opt_sensor"]["fov"])
|
|
|
self.log_handler = logHandler.get_log_handler()
|
|
|
- self._t = None
|
|
|
+ self.showImage = conf["opt_sensor"]["debug_image"] == "yes"
|
|
|
+ self._thread = threading.Thread(target=self._getFrames, args=())
|
|
|
self.success = False
|
|
|
self.running = True
|
|
|
- self.showImage = True
|
|
|
|
|
|
def start(self):
|
|
|
self.log_handler.log_and_print("start optical sensor")
|
|
|
- if not self._t:
|
|
|
- self._t = threading.Thread(target=self._getFrames, args=())
|
|
|
- self._t.start()
|
|
|
+ self._thread.start()
|
|
|
|
|
|
def _getFrames(self):
|
|
|
try:
|
|
@@ -125,7 +134,7 @@ class OpticalSensor():
|
|
|
if position != None:
|
|
|
self.pass_to_gui(position + values)
|
|
|
if self.showImage:
|
|
|
- cv2.imshow("image", image)
|
|
|
+ cv2.imshow("Camera capture with ARUCO detection", image)
|
|
|
if cv2.waitKey(1) & 0xFF == ord('q'):
|
|
|
self.showImage = False
|
|
|
cv2.destroyAllWindows()
|
|
@@ -138,14 +147,22 @@ class OpticalSensor():
|
|
|
|
|
|
def stop(self):
|
|
|
self.running = False
|
|
|
- if self._t:
|
|
|
- self._t.join()
|
|
|
- self.log_handler.log_and_print("stop optical sensor")
|
|
|
+ self._thread.join()
|
|
|
+ self.log_handler.log_and_print("stopped optical sensor")
|
|
|
|
|
|
def calculate_position(self, values):
|
|
|
centerX_perc, distance_perc = values
|
|
|
-
|
|
|
+ # distance (as a percentage of visible_width_at_y)
|
|
|
+ # \ <--------> /<= beta
|
|
|
+ # \ | / /
|
|
|
+ # \ y| / /
|
|
|
+ # \ |/ /
|
|
|
+ # x_off \__/
|
|
|
+ #<---------># <= camera with FoV/2 = alpha
|
|
|
+
|
|
|
+ # extrapolate total width from measured fiducial distance
|
|
|
visible_width_at_y = self.target_distance / distance_perc
|
|
|
+ # 50% is is center of camera, so we shift it by x_offset to align with the system
|
|
|
x = self.x_offset + visible_width_at_y * (centerX_perc-0.5)
|
|
|
alpha = (self.fov/2) / 180 * math.pi
|
|
|
beta = (90 - self.fov/2) / 180 * math.pi
|