opticalSensor.py 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. #!/bin/python3
  2. import time
  3. import math
  4. import threading
  5. import traceback
  6. import queue
  7. import numpy as np
  8. import cv2
  9. import cv2.aruco as aruco
  10. if __name__ != "__main__":
  11. from sensors.calibration import CalibrationStateMashine
  12. import logHandler
  13. aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
  14. def saveMarkers():
  15. image = np.zeros((400,400), np.uint8)
  16. image[:,:] = (255)
  17. image[ 50: 50 + 100, 50: 50 + 100] = aruco_dict.drawMarker(0, 100)
  18. image[ 50: 50 + 100,-50 - 100:-50] = aruco_dict.drawMarker(1, 100)
  19. cv2.imwrite("markers.png", image)
  20. parameters = aruco.DetectorParameters_create()
  21. def find_marker(image, debug=True):
  22. gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
  23. if debug:
  24. image[:,:,0] = gray
  25. image[:,:,1] = gray
  26. image[:,:,2] = gray
  27. corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
  28. if debug:
  29. for corner in rejectedImgPoints:
  30. print(corner)
  31. c = corner[0]
  32. cv2.line(image, tuple(c[0]), tuple(c[1]), (0, 255, 255), 1)
  33. cv2.line(image, tuple(c[1]), tuple(c[2]), (0, 255, 255), 1)
  34. cv2.line(image, tuple(c[2]), tuple(c[3]), (0, 255, 255), 1)
  35. cv2.line(image, tuple(c[3]), tuple(c[0]), (0, 255, 255), 1)
  36. markers = [None] * 4
  37. if not corners:
  38. return markers
  39. for corner, id in zip(corners, ids.flatten()):
  40. # calculate the average of all 4 corners
  41. c = corner[0]
  42. cX = sum(c[:,0]) / 4
  43. cY = sum(c[:,1]) / 4
  44. if debug:
  45. # draw the bounding box of the ArUCo detection
  46. cv2.line(image, tuple(c[0]), tuple(c[1]), (0, 255, 0), 2)
  47. cv2.line(image, tuple(c[1]), tuple(c[2]), (0, 255, 0), 2)
  48. cv2.line(image, tuple(c[2]), tuple(c[3]), (0, 255, 0), 2)
  49. cv2.line(image, tuple(c[3]), tuple(c[0]), (0, 255, 0), 2)
  50. cv2.putText(image, str(id), (int(c[0][0]), int(c[0][1]) - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
  51. cv2.circle(image, (int(cX), int(cY)), 4, (0, 0, 255), -1)
  52. if id < 4:
  53. markers[id] = (cX, cY)
  54. return markers
  55. def measureDistances(image, debug=True):
  56. markers = find_marker(image, debug)
  57. if markers[0] and markers[1]:
  58. # the distance between marker 0 and marker 1
  59. distance = np.sqrt((markers[0][0]-markers[1][0])**2 + (markers[0][1]-markers[1][1])**2)
  60. # the center point between marker 0 and 1
  61. centerX = (markers[0][0]+markers[1][0])/2
  62. centerY = (markers[0][1]+markers[1][1])/2
  63. if debug:
  64. cv2.line(image, (int(markers[0][0]),int(markers[0][1])), (int(markers[1][0]),int(markers[1][1])), (255, 0, 0), 2)
  65. cv2.line(image, (0, 20), (int(centerX), 20), (255, 0, 0), 2)
  66. cv2.line(image, (int(centerX), int(centerY)), (int(centerX), 0), (0, 255, 0), 2)
  67. cv2.circle(image, (int(centerX), int(centerY)), 4, (0, 0, 255), -1)
  68. cv2.putText(image, "%.2fpx" % (distance), (int(markers[0][0]), int(markers[0][1]+25)), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)
  69. cv2.putText(image, "%.2fpx" % (centerX) , (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)
  70. # normalize pixels to percent of horizontal resolution
  71. return centerX/image.shape[1], distance/image.shape[1]
  72. else:
  73. return None
  74. class OpticalSensor():
  75. def __init__(self, conf):
  76. self.conf = conf
  77. self.queue = queue.Queue()
  78. self.calibration_state = CalibrationStateMashine()
  79. self.field_height = float(conf["field"]["y"])
  80. self.field_width = float(conf["field"]["x"])
  81. self.target_distance = float(conf["opt_sensor"]["target_distance"])
  82. self.x_offset = float(conf["opt_sensor"]["x_offset"])
  83. self.fov = float(conf["opt_sensor"]["fov"])
  84. self.log_handler = logHandler.get_log_handler()
  85. self.showImage = conf["opt_sensor"]["debug_image"] == "yes"
  86. self._thread = threading.Thread(target=self._getFrames, args=())
  87. self.success = False
  88. self.running = True
  89. def start(self):
  90. self.log_handler.log_and_print("start optical sensor")
  91. self._thread.start()
  92. def _getFrames(self):
  93. try:
  94. dev = int(self.conf["opt_sensor"]["capture_device"])
  95. except ValueError:
  96. dev = self.conf["opt_sensor"]["capture_device"]
  97. self.cap = cv2.VideoCapture(dev)
  98. while self.running:
  99. self.success, image = self.cap.read()
  100. if self.success:
  101. values = measureDistances(image, self.showImage)
  102. if values:
  103. self.calibrate(values)
  104. position = self.calculate_position(values)
  105. if position != None:
  106. self.pass_to_gui(position + values)
  107. if self.showImage:
  108. cv2.imshow("Camera capture with ARUCO detection", image)
  109. if cv2.waitKey(1) & 0xFF == ord('q'):
  110. self.showImage = False
  111. cv2.destroyAllWindows()
  112. else:
  113. self.log_handler.log_and_print("optical sensor: image capture error")
  114. time.sleep(15)
  115. def calibrate(self, values):
  116. pass
  117. def stop(self):
  118. self.running = False
  119. self._thread.join()
  120. self.log_handler.log_and_print("stopped optical sensor")
  121. def calculate_position(self, values):
  122. centerX_perc, distance_perc = values
  123. # distance (as a percentage of visible_width_at_y)
  124. # \ <--------> /<= beta
  125. # \ | / /
  126. # \ y| / /
  127. # \ |/ /
  128. # x_off \__/
  129. #<---------># <= camera with FoV/2 = alpha
  130. # extrapolate total width from measured fiducial distance
  131. visible_width_at_y = self.target_distance / distance_perc
  132. # 50% is is center of camera, so we shift it by x_offset to align with the system
  133. x = self.x_offset + visible_width_at_y * (centerX_perc-0.5)
  134. alpha = (self.fov/2) / 180 * math.pi
  135. beta = (90 - self.fov/2) / 180 * math.pi
  136. y = visible_width_at_y/2 / math.sin(alpha) * math.sin(beta)
  137. return (x, y)
  138. def pass_to_gui(self, data):
  139. self.queue.put(("data", data))
  140. if __name__ == "__main__":
  141. saveMarkers()
  142. cap = cv2.VideoCapture(1)
  143. while True:
  144. success, image = cap.read()
  145. print(measureDistances(image))
  146. cv2.imshow("image", image)
  147. if cv2.waitKey(1) & 0xFF == ord('q'):
  148. break