Browse Source

Files bzgl optischer Kalibrierung

wokoeck_ch 2 years ago
parent
commit
2bfdd525dd

+ 21 - 0
raspberry-pi/sensors/camera_calibration-master/LICENSE

@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 Abhishek Padalkar
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 18 - 0
raspberry-pi/sensors/camera_calibration-master/README.md

@@ -0,0 +1,18 @@
+# Calibrate camera with arUco markers using opencv and python. 
+(Code developed and tested on opencv 3.3.1)
+
+
+# camera_calibration
+Code and resources for camera calibration using arUco markers and opencv 
+
+1. Print the aruco marker board provided. (You can generate your own board, see the code "camera_calibration.py")
+2. Take around 50 images of the printed board pasted on a flat card-board, from different angles. Use Use data_generation node for this.
+3. Set path to store images first.
+(See sample images of arUco board in aruco_data folder)
+
+## Calibrating camera
+1. Measure length of the side of individual marker and spacing between two marker.
+2. Input above data (length and spacing) in "camera_calibration.py".
+3. Set path to stored images of aruco marker.
+4. Set calibrate_camera flag to true.
+

BIN
raspberry-pi/sensors/camera_calibration-master/aruco_marker_board.pdf


+ 115 - 0
raspberry-pi/sensors/camera_calibration-master/camera_calibration.py

@@ -0,0 +1,115 @@
+"""
+This code assumes that images used for calibration are of the same arUco marker board provided with code
+
+"""
+
+import cv2
+from cv2 import aruco
+import yaml
+import numpy as np
+from pathlib import Path
+from tqdm import tqdm
+
+# root directory of repo for relative path specification.
+root = Path(__file__).parent.absolute()
+
+# Set this flsg True for calibrating camera and False for validating results real time
+calibrate_camera = True
+
+# Set path to the images
+calib_imgs_path = root.joinpath("aruco_data")
+
+# For validating results, show aruco board to camera.
+aruco_dict = aruco.getPredefinedDictionary( aruco.DICT_6X6_1000 ) # !! Hier Format unserer Marker eingeben
+
+#Provide length of the marker's side
+markerLength = 3.75  # Here, measurement unit is centimetre.
+
+# Provide separation between markers
+markerSeparation = 0.5   # Here, measurement unit is centimetre.
+
+# create arUco board
+board = aruco.GridBoard_create(4, 5, markerLength, markerSeparation, aruco_dict) # !! Das GridBoard könnte man aus opticalSensor.py imortieren? oder einfach nochmal kreieren
+
+'''uncomment following block to draw and show the board'''
+#img = board.draw((864,1080))
+#cv2.imshow("aruco", img)
+
+arucoParams = aruco.DetectorParameters_create()
+
+if calibrate_camera == True:
+    img_list = []
+    calib_fnms = calib_imgs_path.glob('*.jpg') # auf png bzgl. unserer Marker
+    print('Using ...', end='')
+    for idx, fn in enumerate(calib_fnms):
+        print(idx, '', end='')
+        img = cv2.imread( str(root.joinpath(fn) ))
+        img_list.append( img )
+        h, w, c = img.shape
+    print('Calibration images')
+
+    counter, corners_list, id_list = [], [], []
+    first = True
+    for im in tqdm(img_list):
+        img_gray = cv2.cvtColor(im,cv2.COLOR_RGB2GRAY)
+        corners, ids, rejectedImgPoints = aruco.detectMarkers(img_gray, aruco_dict, parameters=arucoParams)
+        if first == True:
+            corners_list = corners
+            id_list = ids
+            first = False
+        else:
+            corners_list = np.vstack((corners_list, corners))
+            id_list = np.vstack((id_list,ids))
+        counter.append(len(ids))
+    print('Found {} unique markers'.format(np.unique(ids)))
+
+    counter = np.array(counter)
+    print ("Calibrating camera .... Please wait...")
+    #mat = np.zeros((3,3), float)
+    ret, mtx, dist, rvecs, tvecs = aruco.calibrateCameraAruco(corners_list, id_list, counter, board, img_gray.shape, None, None )
+
+    print("Camera matrix is \n", mtx, "\n And is stored in calibration.yaml file along with distortion coefficients : \n", dist)
+    data = {'camera_matrix': np.asarray(mtx).tolist(), 'dist_coeff': np.asarray(dist).tolist()}
+    with open("calibration.yaml", "w") as f: # !!!
+        yaml.dump(data, f)
+
+else:
+    camera = cv2.VideoCapture(0)
+    ret, img = camera.read()
+
+    with open('calibration.yaml') as f: # !!!
+        loadeddict = yaml.load(f)
+    mtx = loadeddict.get('camera_matrix')
+    dist = loadeddict.get('dist_coeff')
+    mtx = np.array(mtx)
+    dist = np.array(dist)
+
+    ret, img = camera.read()
+    img_gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
+    h,  w = img_gray.shape[:2]
+    newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))
+
+    pose_r, pose_t = [], []
+    while True:
+        ret, img = camera.read()
+        img_aruco = img
+        im_gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
+        h,  w = im_gray.shape[:2]
+        dst = cv2.undistort(im_gray, mtx, dist, None, newcameramtx)
+        corners, ids, rejectedImgPoints = aruco.detectMarkers(dst, aruco_dict, parameters=arucoParams)
+        #cv2.imshow("original", img_gray)
+        if corners == None:
+            print ("pass")
+        else:
+
+            ret, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, newcameramtx, dist) # For a board
+            print ("Rotation ", rvec, "Translation", tvec)
+            if ret != 0:
+                img_aruco = aruco.drawDetectedMarkers(img, corners, ids, (0,255,0))
+                img_aruco = aruco.drawAxis(img_aruco, newcameramtx, dist, rvec, tvec, 10)    # axis length 100 can be changed according to your requirement
+
+            if cv2.waitKey(0) & 0xFF == ord('q'):
+                break;
+        cv2.imshow("World co-ordinate frame axes", img_aruco)
+
+cv2.destroyAllWindows()

+ 28 - 0
raspberry-pi/sensors/camera_calibration-master/data_generation.py

@@ -0,0 +1,28 @@
+'''This script is for generating data
+1. Provide desired path to store images.
+2. Press 'c' to capture image and display it.
+3. Press any button to continue.
+4. Press 'q' to quit.
+'''
+
+import cv2
+
+camera = cv2.VideoCapture(0)
+ret, img = camera.read()
+
+
+#path = "/home/abhishek/stuff/object_detection/explore/aruco_data/" # !!
+count = 0
+while True:
+    name = path + str(count)+".jpg"
+    ret, img = camera.read()
+    cv2.imshow("img", img)
+
+
+    if cv2.waitKey(20) & 0xFF == ord('c'):
+        cv2.imwrite(name, img)
+        cv2.imshow("img", img)
+        count += 1
+        if cv2.waitKey(0) & 0xFF == ord('q'):
+
+            break;

+ 11 - 0
raspberry-pi/sensors/camera_calibration-master/importante.txt

@@ -0,0 +1,11 @@
+### bzgl.Optische Kalibrierung ###
+
+-   da es schon funktioniert hat, müssen (nur) einige Parameter angepasst werden bzw. der Code so umgeschrieben werden, sodass die distortion coefficients, sowie
+    die camera matrix, nicht in einer .yaml, sondern in der config.ini gespeichert werden
+-   dafür den ConfigParser verwenden
+    Links:  https://docs.python.org/3/library/configparser.html
+            https://wiki.python.org/moin/ConfigParserExamples
+            https://learntutorials.net/de/python/topic/9186/configparser
+-   Wie es genau funktioniert steht in den Comments beider .py-Dateien
+-   Wie man es benutzt, steht in der README.md
+-   (Wenn möglich) data_generation.py & camera_calibration.py als eine datei (nicht pflicht)

BIN
raspberry-pi/sensors/camera_calibration-master/markers.png