123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184 |
- #!/bin/python3
- import time
- import math
- import threading
- import traceback
- import queue
- import numpy as np
- import cv2
- import cv2.aruco as aruco
- if __name__ != "__main__":
- from sensors.calibration import CalibrationStateMashine
- import logHandler
- aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
- def saveMarkers():
- image = np.zeros((400,400), np.uint8)
- image[:,:] = (255)
- image[ 50: 50 + 100, 50: 50 + 100] = aruco_dict.drawMarker(0, 100)
- image[ 50: 50 + 100,-50 - 100:-50] = aruco_dict.drawMarker(1, 100)
- cv2.imwrite("markers.png", image)
- parameters = aruco.DetectorParameters_create()
- def find_marker(image, debug=True):
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
- if debug:
- image[:,:,0] = gray
- image[:,:,1] = gray
- image[:,:,2] = gray
- corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
- if debug:
- for corner in rejectedImgPoints:
- print(corner)
- c = corner[0]
- cv2.line(image, tuple(c[0]), tuple(c[1]), (0, 255, 255), 1)
- cv2.line(image, tuple(c[1]), tuple(c[2]), (0, 255, 255), 1)
- cv2.line(image, tuple(c[2]), tuple(c[3]), (0, 255, 255), 1)
- cv2.line(image, tuple(c[3]), tuple(c[0]), (0, 255, 255), 1)
-
- markers = [None] * 4
- if not corners:
- return markers
- for corner, id in zip(corners, ids.flatten()):
- # calculate the average of all 4 corners
- c = corner[0]
- cX = sum(c[:,0]) / 4
- cY = sum(c[:,1]) / 4
- if debug:
- # draw the bounding box of the ArUCo detection
- cv2.line(image, tuple(c[0]), tuple(c[1]), (0, 255, 0), 2)
- cv2.line(image, tuple(c[1]), tuple(c[2]), (0, 255, 0), 2)
- cv2.line(image, tuple(c[2]), tuple(c[3]), (0, 255, 0), 2)
- cv2.line(image, tuple(c[3]), tuple(c[0]), (0, 255, 0), 2)
- cv2.putText(image, str(id), (int(c[0][0]), int(c[0][1]) - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
- cv2.circle(image, (int(cX), int(cY)), 4, (0, 0, 255), -1)
- if id < 4:
- markers[id] = (cX, cY)
- return markers
- def measureDistances(image, debug=True):
- markers = find_marker(image, debug)
- if markers[0] and markers[1]:
- # the distance between marker 0 and marker 1
- distance = np.sqrt((markers[0][0]-markers[1][0])**2 + (markers[0][1]-markers[1][1])**2)
- # the center point between marker 0 and 1
- centerX = (markers[0][0]+markers[1][0])/2
- centerY = (markers[0][1]+markers[1][1])/2
- if debug:
- cv2.line(image, (int(markers[0][0]),int(markers[0][1])), (int(markers[1][0]),int(markers[1][1])), (255, 0, 0), 2)
- cv2.line(image, (0, 20), (int(centerX), 20), (255, 0, 0), 2)
- cv2.line(image, (int(centerX), int(centerY)), (int(centerX), 0), (0, 255, 0), 2)
- cv2.circle(image, (int(centerX), int(centerY)), 4, (0, 0, 255), -1)
- cv2.putText(image, "%.2fpx" % (distance), (int(markers[0][0]), int(markers[0][1]+25)), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)
- cv2.putText(image, "%.2fpx" % (centerX) , (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)
-
- # normalize pixels to percent of horizontal resolution
- return centerX/image.shape[1], distance/image.shape[1]
- else:
- return None
- class OpticalSensor():
- def __init__(self, conf):
- self.conf = conf
- self.queue = queue.Queue()
- self.calibration_state = CalibrationStateMashine()
- self.field_height = float(conf["field"]["y"])
- self.field_width = float(conf["field"]["x"])
- self.target_distance = float(conf["opt_sensor"]["target_distance"])
- self.x_offset = float(conf["opt_sensor"]["x_offset"])
- self.fov = float(conf["opt_sensor"]["fov"])
- self.log_handler = logHandler.get_log_handler()
- self.showImage = conf["opt_sensor"]["debug_image"] == "yes"
- self._thread = threading.Thread(target=self._getFrames, args=())
- self.success = False
- self.running = True
- def start(self):
- self.log_handler.log_and_print("start optical sensor")
- self._thread.start()
- def _getFrames(self):
- try:
- dev = int(self.conf["opt_sensor"]["capture_device"])
- except ValueError:
- dev = self.conf["opt_sensor"]["capture_device"]
- self.cap = cv2.VideoCapture(dev)
- while self.running:
- self.success, image = self.cap.read()
- if self.success:
- values = measureDistances(image, self.showImage)
- if values:
- self.calibrate(values)
- position = self.calculate_position(values)
- if position != None:
- self.pass_to_gui(position + values)
- if self.showImage:
- cv2.imshow("Camera capture with ARUCO detection", image)
- if cv2.waitKey(1) & 0xFF == ord('q'):
- self.showImage = False
- cv2.destroyAllWindows()
- else:
- self.log_handler.log_and_print("optical sensor: image capture error")
- time.sleep(15)
- def calibrate(self, values):
- pass
- def stop(self):
- self.running = False
- self._thread.join()
- self.log_handler.log_and_print("stopped optical sensor")
- def calculate_position(self, values):
- centerX_perc, distance_perc = values
- # distance (as a percentage of visible_width_at_y)
- # \ <--------> /<= beta
- # \ | / /
- # \ y| / /
- # \ |/ /
- # x_off \__/
- #<---------># <= camera with FoV/2 = alpha
- # extrapolate total width from measured fiducial distance
- visible_width_at_y = self.target_distance / distance_perc
- # 50% is is center of camera, so we shift it by x_offset to align with the system
- x = self.x_offset + visible_width_at_y * (centerX_perc-0.5)
- alpha = (self.fov/2) / 180 * math.pi
- beta = (90 - self.fov/2) / 180 * math.pi
- y = visible_width_at_y/2 / math.sin(alpha) * math.sin(beta)
- return (x, y)
- def pass_to_gui(self, data):
- self.queue.put(("data", data))
- if __name__ == "__main__":
- saveMarkers()
- cap = cv2.VideoCapture(1)
- while True:
- success, image = cap.read()
- print(measureDistances(image))
- cv2.imshow("image", image)
- if cv2.waitKey(1) & 0xFF == ord('q'):
- break
|