camera_calibration.py 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. """
  2. This code assumes that images used for calibration are of the same arUco marker board provided with code
  3. """
  4. import cv2
  5. from cv2 import aruco
  6. import yaml
  7. import numpy as np
  8. from pathlib import Path
  9. from tqdm import tqdm
  10. # root directory of repo for relative path specification.
  11. root = Path(__file__).parent.absolute()
  12. # Set this flsg True for calibrating camera and False for validating results real time
  13. calibrate_camera = True
  14. # Set path to the images
  15. calib_imgs_path = root.joinpath("aruco_data")
  16. # For validating results, show aruco board to camera.
  17. aruco_dict = aruco.getPredefinedDictionary( aruco.DICT_6X6_1000 ) # !! Hier Format unserer Marker eingeben
  18. #Provide length of the marker's side
  19. markerLength = 3.75 # Here, measurement unit is centimetre.
  20. # Provide separation between markers
  21. markerSeparation = 0.5 # Here, measurement unit is centimetre.
  22. # create arUco board
  23. board = aruco.GridBoard_create(4, 5, markerLength, markerSeparation, aruco_dict) # !! Das GridBoard könnte man aus opticalSensor.py imortieren? oder einfach nochmal kreieren
  24. '''uncomment following block to draw and show the board'''
  25. #img = board.draw((864,1080))
  26. #cv2.imshow("aruco", img)
  27. arucoParams = aruco.DetectorParameters_create()
  28. if calibrate_camera == True:
  29. img_list = []
  30. calib_fnms = calib_imgs_path.glob('*.jpg') # auf png bzgl. unserer Marker
  31. print('Using ...', end='')
  32. for idx, fn in enumerate(calib_fnms):
  33. print(idx, '', end='')
  34. img = cv2.imread( str(root.joinpath(fn) ))
  35. img_list.append( img )
  36. h, w, c = img.shape
  37. print('Calibration images')
  38. counter, corners_list, id_list = [], [], []
  39. first = True
  40. for im in tqdm(img_list):
  41. img_gray = cv2.cvtColor(im,cv2.COLOR_RGB2GRAY)
  42. corners, ids, rejectedImgPoints = aruco.detectMarkers(img_gray, aruco_dict, parameters=arucoParams)
  43. if first == True:
  44. corners_list = corners
  45. id_list = ids
  46. first = False
  47. else:
  48. corners_list = np.vstack((corners_list, corners))
  49. id_list = np.vstack((id_list,ids))
  50. counter.append(len(ids))
  51. print('Found {} unique markers'.format(np.unique(ids)))
  52. counter = np.array(counter)
  53. print ("Calibrating camera .... Please wait...")
  54. #mat = np.zeros((3,3), float)
  55. ret, mtx, dist, rvecs, tvecs = aruco.calibrateCameraAruco(corners_list, id_list, counter, board, img_gray.shape, None, None )
  56. print("Camera matrix is \n", mtx, "\n And is stored in calibration.yaml file along with distortion coefficients : \n", dist)
  57. data = {'camera_matrix': np.asarray(mtx).tolist(), 'dist_coeff': np.asarray(dist).tolist()}
  58. with open("calibration.yaml", "w") as f: # !!!
  59. yaml.dump(data, f)
  60. else:
  61. camera = cv2.VideoCapture(0)
  62. ret, img = camera.read()
  63. with open('calibration.yaml') as f: # !!!
  64. loadeddict = yaml.load(f)
  65. mtx = loadeddict.get('camera_matrix')
  66. dist = loadeddict.get('dist_coeff')
  67. mtx = np.array(mtx)
  68. dist = np.array(dist)
  69. ret, img = camera.read()
  70. img_gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
  71. h, w = img_gray.shape[:2]
  72. newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))
  73. pose_r, pose_t = [], []
  74. while True:
  75. ret, img = camera.read()
  76. img_aruco = img
  77. im_gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
  78. h, w = im_gray.shape[:2]
  79. dst = cv2.undistort(im_gray, mtx, dist, None, newcameramtx)
  80. corners, ids, rejectedImgPoints = aruco.detectMarkers(dst, aruco_dict, parameters=arucoParams)
  81. #cv2.imshow("original", img_gray)
  82. if corners == None:
  83. print ("pass")
  84. else:
  85. ret, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, newcameramtx, dist) # For a board
  86. print ("Rotation ", rvec, "Translation", tvec)
  87. if ret != 0:
  88. img_aruco = aruco.drawDetectedMarkers(img, corners, ids, (0,255,0))
  89. img_aruco = aruco.drawAxis(img_aruco, newcameramtx, dist, rvec, tvec, 10) # axis length 100 can be changed according to your requirement
  90. if cv2.waitKey(0) & 0xFF == ord('q'):
  91. break;
  92. cv2.imshow("World co-ordinate frame axes", img_aruco)
  93. cv2.destroyAllWindows()