123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168 |
- #!/bin/python3
- import time
- import math
- import threading
- import traceback
- import queue
- import numpy as np
- import cv2
- import cv2.aruco as aruco
- if __name__ != "__main__":
- from sensors.calibration import CalibrationStateMashine
- import logHandler
- aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
- def saveMarkers():
- image = np.zeros((400,400), np.uint8)
- image[:,:] = (255)
- image[ 50: 50 + 100, 50: 50 + 100] = aruco_dict.drawMarker(0, 100)
- image[ 50: 50 + 100,-50 - 100:-50] = aruco_dict.drawMarker(1, 100)
- cv2.imwrite("markers.png", image)
- parameters = aruco.DetectorParameters_create()
- def find_marker(image, debug=True):
- # Our operations on the frame come here
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
- im_bw = cv2.equalizeHist(gray)
- if debug:
- image[:,:,0] = im_bw
- image[:,:,1] = im_bw
- image[:,:,2] = im_bw
- corners, ids, rejectedImgPoints = aruco.detectMarkers(im_bw, aruco_dict, parameters=parameters)
-
- markers = [None] * 4
- if not corners:
- return markers
- for corner, id in zip(corners, ids.flatten()):
- # draw the bounding box of the ArUCo detection
- c = corner[0]
- cX = sum(c[:,0]) / 4
- cY = sum(c[:,1]) / 4
- if debug:
- cv2.line(image, tuple(c[0]), tuple(c[1]), (0, 255, 0), 2)
- cv2.line(image, tuple(c[1]), tuple(c[2]), (0, 255, 0), 2)
- cv2.line(image, tuple(c[2]), tuple(c[3]), (0, 255, 0), 2)
- cv2.line(image, tuple(c[3]), tuple(c[0]), (0, 255, 0), 2)
- cv2.putText(image, str(id), (int(c[0][0]), int(c[0][1]) - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
- cv2.circle(image, (int(cX), int(cY)), 4, (0, 0, 255), -1)
- if id < 4:
- markers[id] = (cX, cY)
- return markers
- def measureDistances(image, debug=True):
- markers = find_marker(image, debug)
- if markers[0] and markers[1]:
-
- distance = np.sqrt((markers[0][0]-markers[1][0])**2 + (markers[0][1]-markers[1][1])**2)
- centerX = (markers[0][0]+markers[1][0])/2
- centerY = (markers[0][1]+markers[1][1])/2
- if debug:
- cv2.line(image, (int(markers[0][0]),int(markers[0][1])), (int(markers[1][0]),int(markers[1][1])), (255, 0, 0), 2)
- cv2.line(image, (0, 20), (int(centerX), 20), (255, 0, 0), 2)
- cv2.line(image, (int(centerX), int(centerY)), (int(centerX), 0), (0, 255, 0), 2)
- cv2.circle(image, (int(centerX), int(centerY)), 4, (0, 0, 255), -1)
- cv2.putText(image, "%.2fpx" % (distance), (int(markers[0][0]), int(markers[0][1]+25)), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)
- cv2.putText(image, "%.2fpx" % (centerX) , (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)
-
- return centerX/image.shape[1], distance/image.shape[1]
- else:
- return None
- class OpticalSensor():
- def __init__(self, conf):
- self.conf = conf
- self.queue = queue.Queue()
- self.calibration_state = CalibrationStateMashine()
- self.field_height = float(conf["field"]["y"])
- self.field_width = float(conf["field"]["x"])
- self.target_distance = float(conf["opt_sensor"]["target_distance"])
- self.x_offset = float(conf["opt_sensor"]["x_offset"])
- self.fov = float(conf["opt_sensor"]["fov"])
- self.log_handler = logHandler.get_log_handler()
- self._t = None
- self.success = False
- self.running = True
- self.showImage = True
- def start(self):
- self.log_handler.log_and_print("start optical sensor")
- if not self._t:
- self._t = threading.Thread(target=self._getFrames, args=())
- self._t.start()
- def _getFrames(self):
- try:
- dev = int(self.conf["opt_sensor"]["capture_device"])
- except ValueError:
- dev = self.conf["opt_sensor"]["capture_device"]
- self.cap = cv2.VideoCapture(dev)
- while self.running:
- self.success, image = self.cap.read()
- if self.success:
- values = measureDistances(image, self.showImage)
- if values:
- self.calibrate(values)
- position = self.calculate_position(values)
- if position != None:
- self.pass_to_gui(position + values)
- if self.showImage:
- cv2.imshow("image", image)
- if cv2.waitKey(1) & 0xFF == ord('q'):
- self.showImage = False
- cv2.destroyAllWindows()
- else:
- self.log_handler.log_and_print("optical sensor: image capture error")
- time.sleep(15)
- def calibrate(self, values):
- pass
- def stop(self):
- self.running = False
- if self._t:
- self._t.join()
- self.log_handler.log_and_print("stop optical sensor")
- def calculate_position(self, values):
- centerX_perc, distance_perc = values
- visible_width_at_y = self.target_distance / distance_perc
- x = self.x_offset + visible_width_at_y * (centerX_perc-0.5)
- alpha = (self.fov/2) / 180 * math.pi
- beta = (90 - self.fov/2) / 180 * math.pi
- y = visible_width_at_y/2 / math.sin(alpha) * math.sin(beta)
- return (x, y)
- def pass_to_gui(self, data):
- self.queue.put(("data", data))
- if __name__ == "__main__":
- saveMarkers()
- cap = cv2.VideoCapture(1)
- while True:
- success, image = cap.read()
- print(measureDistances(image))
- cv2.imshow("image", image)
- if cv2.waitKey(1) & 0xFF == ord('q'):
- break
|