opticalSensor.py 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. #!/bin/python3
  2. import time
  3. import math
  4. import threading
  5. import traceback
  6. import queue
  7. import numpy as np
  8. import cv2
  9. import cv2.aruco as aruco
  10. if __name__ != "__main__":
  11. from sensors.calibration import CalibrationStateMashine
  12. import logHandler
  13. aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
  14. def saveMarkers():
  15. image = np.zeros((400,400), np.uint8)
  16. image[:,:] = (255)
  17. image[ 50: 50 + 100, 50: 50 + 100] = aruco_dict.drawMarker(0, 100)
  18. image[ 50: 50 + 100,-50 - 100:-50] = aruco_dict.drawMarker(1, 100)
  19. cv2.imwrite("markers.png", image)
  20. parameters = aruco.DetectorParameters_create()
  21. def find_marker(image, debug=True):
  22. gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
  23. if debug:
  24. image[:,:,0] = gray
  25. image[:,:,1] = gray
  26. image[:,:,2] = gray
  27. corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
  28. if debug:
  29. for corner in rejectedImgPoints:
  30. c = corner[0]
  31. cv2.line(image, tuple(c[0]), tuple(c[1]), (0, 255, 255), 1)
  32. cv2.line(image, tuple(c[1]), tuple(c[2]), (0, 255, 255), 1)
  33. cv2.line(image, tuple(c[2]), tuple(c[3]), (0, 255, 255), 1)
  34. cv2.line(image, tuple(c[3]), tuple(c[0]), (0, 255, 255), 1)
  35. markers = [None] * 4
  36. if not corners:
  37. return markers
  38. for corner, id in zip(corners, ids.flatten()):
  39. # calculate the average of all 4 corners
  40. c = corner[0]
  41. cX = sum(c[:,0]) / 4
  42. cY = sum(c[:,1]) / 4
  43. if debug:
  44. # draw the bounding box of the ArUCo detection
  45. cv2.line(image, tuple(c[0]), tuple(c[1]), (0, 255, 0), 2)
  46. cv2.line(image, tuple(c[1]), tuple(c[2]), (0, 255, 0), 2)
  47. cv2.line(image, tuple(c[2]), tuple(c[3]), (0, 255, 0), 2)
  48. cv2.line(image, tuple(c[3]), tuple(c[0]), (0, 255, 0), 2)
  49. cv2.putText(image, str(id), (int(c[0][0]), int(c[0][1]) - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
  50. cv2.circle(image, (int(cX), int(cY)), 4, (0, 0, 255), -1)
  51. if id < 4:
  52. markers[id] = (cX, cY)
  53. return markers
  54. def measureDistances(image, debug=True):
  55. markers = find_marker(image, debug)
  56. if markers[0] and markers[1]:
  57. # the distance between marker 0 and marker 1
  58. distance = np.sqrt((markers[0][0]-markers[1][0])**2 + (markers[0][1]-markers[1][1])**2)
  59. # the center point between marker 0 and 1
  60. centerX = (markers[0][0]+markers[1][0])/2
  61. centerY = (markers[0][1]+markers[1][1])/2
  62. if debug:
  63. cv2.line(image, (int(markers[0][0]),int(markers[0][1])), (int(markers[1][0]),int(markers[1][1])), (255, 0, 0), 2)
  64. cv2.line(image, (0, 20), (int(centerX), 20), (255, 0, 0), 2)
  65. cv2.line(image, (int(centerX), int(centerY)), (int(centerX), 0), (0, 255, 0), 2)
  66. cv2.circle(image, (int(centerX), int(centerY)), 4, (0, 0, 255), -1)
  67. cv2.putText(image, "%.2fpx" % (distance), (int(markers[0][0]), int(markers[0][1]+25)), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)
  68. cv2.putText(image, "%.2fpx" % (centerX) , (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)
  69. # normalize pixels to percent of horizontal resolution
  70. return centerX/image.shape[1], distance/image.shape[1]
  71. else:
  72. return None
  73. class OpticalSensor():
  74. def __init__(self, conf):
  75. self.conf = conf
  76. self.queue = queue.Queue()
  77. self.calibration_state = CalibrationStateMashine()
  78. self.field_height = float(conf["field"]["y"])
  79. self.field_width = float(conf["field"]["x"])
  80. self.target_distance = float(conf["opt_sensor"]["target_distance"])
  81. self.x_offset = float(conf["opt_sensor"]["x_offset"])
  82. self.fov = float(conf["opt_sensor"]["fov"])
  83. self.log_handler = logHandler.get_log_handler()
  84. self.showImage = conf["opt_sensor"]["debug_image"] == "yes"
  85. self._thread = threading.Thread(target=self._getFrames, args=())
  86. self.success = False
  87. self.running = True
  88. def start(self):
  89. self.log_handler.log_and_print("start optical sensor")
  90. self._thread.start()
  91. def _getFrames(self):
  92. try:
  93. dev = int(self.conf["opt_sensor"]["capture_device"])
  94. except ValueError:
  95. dev = self.conf["opt_sensor"]["capture_device"]
  96. self.cap = cv2.VideoCapture(dev)
  97. while self.running:
  98. self.success, image = self.cap.read()
  99. if self.success:
  100. values = measureDistances(image, self.showImage)
  101. if values:
  102. self.calibrate(values)
  103. position = self.calculate_position(values)
  104. if position != None:
  105. self.pass_to_gui(position + values)
  106. if self.showImage:
  107. cv2.imshow("Camera capture with ARUCO detection", image)
  108. if cv2.waitKey(1) & 0xFF == ord('q'):
  109. self.showImage = False
  110. cv2.destroyAllWindows()
  111. else:
  112. self.log_handler.log_and_print("optical sensor: image capture error")
  113. time.sleep(15)
  114. def calibrate(self, values):
  115. pass
  116. def stop(self):
  117. self.running = False
  118. self._thread.join()
  119. self.log_handler.log_and_print("stopped optical sensor")
  120. def calculate_position(self, values):
  121. centerX_perc, distance_perc = values
  122. # distance (as a percentage of visible_width_at_y)
  123. # \ <--------> /<= beta
  124. # \ | / /
  125. # \ y| / /
  126. # \ |/ /
  127. # x_off \__/
  128. #<---------># <= camera with FoV/2 = alpha
  129. # extrapolate total width from measured fiducial distance
  130. visible_width_at_y = self.target_distance / distance_perc
  131. # 50% is is center of camera, so we shift it by x_offset to align with the system
  132. x = self.x_offset + visible_width_at_y * (centerX_perc-0.5)
  133. alpha = (self.fov/2) / 180 * math.pi
  134. beta = (90 - self.fov/2) / 180 * math.pi
  135. y = visible_width_at_y/2 / math.sin(alpha) * math.sin(beta)
  136. return (x, y)
  137. def pass_to_gui(self, data):
  138. self.queue.put(("data", data))
  139. if __name__ == "__main__":
  140. saveMarkers()
  141. cap = cv2.VideoCapture(1)
  142. while True:
  143. success, image = cap.read()
  144. print(measureDistances(image))
  145. cv2.imshow("image", image)
  146. if cv2.waitKey(1) & 0xFF == ord('q'):
  147. break