123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110 |
- #!/bin/python3
- import time
- import threading
- import traceback
- import numpy as np
- import cv2
- import cv2.aruco as aruco
- aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
- def saveMarkers():
- image = np.zeros((400,400), np.uint8)
- image[:,:] = (255)
- image[ 50: 50 + 100, 50: 50 + 100] = aruco_dict.drawMarker(0, 100)
- image[-50 - 100:-50, 50: 50 + 100] = aruco_dict.drawMarker(1, 100)
- image[ 50: 50 + 100,-50 - 100:-50] = aruco_dict.drawMarker(2, 100)
- image[-50 - 100:-50,-50 - 100:-50] = aruco_dict.drawMarker(3, 100)
- cv2.imwrite("markers.png", image)
- parameters = aruco.DetectorParameters_create()
- def find_marker(image):
- # Our operations on the frame come here
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
- corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
-
- markers = [None] * 4
- if not corners:
- return markers
- for corner, id in zip(corners, ids.flatten()):
- # draw the bounding box of the ArUCo detection
- c = corner[0]
- cv2.line(image, tuple(c[0]), tuple(c[1]), (0, 255, 0), 2)
- cv2.line(image, tuple(c[1]), tuple(c[2]), (0, 255, 0), 2)
- cv2.line(image, tuple(c[2]), tuple(c[3]), (0, 255, 0), 2)
- cv2.line(image, tuple(c[3]), tuple(c[0]), (0, 255, 0), 2)
- cv2.putText(image, str(id), (int(c[0][0]), int(c[0][1]) - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
- cX = sum(c[:,0]) / 4
- cY = sum(c[:,1]) / 4
- cv2.circle(image, (int(cX), int(cY)), 4, (0, 0, 255), -1)
- if id < 4:
- markers[id] = (cX, cY)
- return markers
- def measureDistances(image):
- markers = find_marker(image)
- if markers[0] and markers[1]:
- cv2.line(image, (int(markers[0][0]),int(markers[0][1])), (int(markers[1][0]),int(markers[1][1])), (255, 0, 0), 2)
-
- length = np.sqrt((markers[0][0]-markers[1][0])**2 + (markers[0][1]-markers[1][1])**2)
- middle = abs(markers[0][0]-markers[1][0])
- return middle, length
- else:
- return None
- class OpticalSensor():
- def __init__(self):
- self.cap = cv2.VideoCapture(0)
- self._t = None
- self.values = None
- def start(self):
- if not self._t:
- self._t = threading.Thread(target=self._getFrames, args=())
- self._t.daemon = True # thread dies when main thread (only non-daemon thread) exits.
- self._t.start()
- def _getFrames(self):
- while True:
- success, image = self.cap.read()
- if success:
- self.values = measureDistances(image)
- print("opt:", self.values)
- def calibrate(self, x, y):
- pass
- def read(self):
- return self.values
- if __name__ == "__main__":
- # opt = OpticalSensor()
- # opt.start()
- # while True:
- # time.sleep(1)
- cap = cv2.VideoCapture(0)
- while True:
- success, image = cap.read()
- markers = find_marker(image)
- if markers[0] and markers[1]:
- cv2.line(image, (int(markers[0][0]),int(markers[0][1])), (int(markers[1][0]),int(markers[1][1])), (255, 0, 0), 2)
- length = np.sqrt((markers[0][0]-markers[1][0])**2 + (markers[0][1]-markers[1][1])**2)
- cv2.putText(image, "%.2fpx" % (length), (int(markers[0][0]), int(markers[0][1])), cv2.FONT_HERSHEY_SIMPLEX,
- 1.0, (255, 255, 0), 3)
- cv2.imshow("image", image)
- if cv2.waitKey(1) & 0xFF == ord('q'):
- break
|