Explorar el Código

Merge branch 'master' into testing

subDesTagesMitExtraKaese hace 3 años
padre
commit
c53814138c

+ 2 - 0
.gitignore

@@ -1,3 +1,5 @@
 .vscode/c_cpp_properties.json
+raspberry-pi/.venv
+raspberry-pi/.vscode/settings.json
 
 __pycache__

+ 49 - 0
raspberry-pi/gui/graph.py

@@ -0,0 +1,49 @@
+import tkinter as tk
+import time, math, noise
+from PIL import Image, ImageDraw, ImageTk
+
+class Graph(tk.Canvas):
+  def __init__(self, root, **kwargs):
+    self.root = root
+    tk.Canvas.__init__(self, root, **kwargs)
+    self.height = self.winfo_reqheight()
+    self.width = self.winfo_reqwidth()
+    self.coord = [(10, 50), (20, 50), (100, 100)]
+    
+    self.drawBackground()
+    self.canvas = self.bg.copy()
+    self.image = self.create_image(0, 0, image=None, anchor='nw')
+
+    self.bind("<Configure>", self.on_resize)
+    self.n = 0
+
+  def drawBackground(self):
+    self.bg = Image.new('RGB', (self.width, self.height), (0,20,0))
+    draw = ImageDraw.Draw(self.bg)
+    draw.line([(0, self.height/2), (self.width, self.height/2)], (0,127,127), 1)
+    draw.line([(self.width/2, 0), (self.width/2, self.height)], (0,127,127), 1)
+
+
+  def on_resize(self,event):
+    # determine the ratio of old width/height to new width/height
+    self.width = max(400, event.width-4)
+    self.height = max(400, event.height-4)
+    # resize the canvas 
+    self.config(width=self.width, height=self.height)
+    self.canvas = self.canvas.resize((self.width, self.height))
+    self.drawBackground()
+    self.canvas = Image.blend(self.canvas, self.bg, 0.1)
+
+  def update(self):
+    self.coord = [self.coord[-1]]
+    for i in range(30):
+      self.coord.append(((noise.pnoise1(self.n)+1)*self.width/2, (noise.pnoise1(self.n*1.3+3)+1)*self.height/2))
+
+      self.n += 0.001
+
+    self.canvas = Image.blend(self.canvas, self.bg, 1/250)
+    draw = ImageDraw.Draw(self.canvas)
+    draw.line(self.coord, fill=(100, 255, 100, 255), width=int(self.height/100), joint='curve')
+
+    self.photo = ImageTk.PhotoImage(self.canvas)
+    self.itemconfig(self.image, image=self.photo)

+ 30 - 0
raspberry-pi/gui/mainWindow.py

@@ -0,0 +1,30 @@
+import tkinter as tk
+import time
+
+from graph import Graph
+
+class MainWindow(tk.Frame):
+  def __init__(self, root):
+    self.root = root
+    tk.Frame.__init__(self, root)
+
+    self.graph = Graph(self)
+    self.graph.pack(fill=tk.BOTH, side=tk.LEFT, expand=True)
+
+    self.controls = tk.Frame(self)
+    self.controls.pack(fill=tk.BOTH, side=tk.RIGHT)
+
+    l = tk.Label(self.controls, text="your widgets go here...", anchor="c")
+    l.pack(side="top", fill="both", expand=True)
+
+  def update(self):
+    self.graph.update()
+    self.root.after(30, self.update)
+
+if __name__ == "__main__":
+  root = tk.Tk()
+  root.title("Tracking System")
+  view = MainWindow(root)
+  view.pack(side="top", fill="both", expand=True)
+  view.update()
+  root.mainloop()

+ 13 - 0
raspberry-pi/install.sh

@@ -0,0 +1,13 @@
+#!/bin/bash
+
+sudo apt update                               || exit 1
+sudo apt upgrade -y                           || exit 1
+
+sudo apt install python3 python3-pip -y       || exit 1
+
+python3 -m pip install --upgrade pip          || exit 1
+python3 -m pip install pyserial               || exit 1
+python3 -m pip install opencv-contrib-python  || exit 1
+python3 -m pip install pillow                 || exit 1
+python3 -m pip install noise                  || exit 1
+python3 -m pip install tkinter                || exit 1

+ 21 - 1
raspberry-pi/main.py

@@ -1,3 +1,23 @@
-from sensors import *
+import sensors
+import time
+
+distance = 450 #in mm
+
+def main():
+    ac_sensor = sensors.AcusticSensor()
+    ac_sensor.start()
+    ac_sensor.calibrate(distance)
+    try:
+        while True:
+            x,y = ac_sensor.calculate_position()
+            print(x,y)
+            time.sleep(2)
+    except KeyboardInterrupt:
+        print("exit")
+    except Exception as e:
+        print("Error: ",e)
+
+main()
+
 
 

+ 78 - 0
raspberry-pi/markerDetection.py

@@ -0,0 +1,78 @@
+#!/bin/python3
+
+import numpy as np
+import cv2
+import cv2.aruco as aruco
+
+aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
+
+def saveMarkers():
+  image = np.zeros((400,400), np.uint8)
+  image[:,:] = (255)
+
+  image[ 50: 50 + 100, 50: 50 + 100] = aruco_dict.drawMarker(0, 100)
+  image[-50 - 100:-50, 50: 50 + 100] = aruco_dict.drawMarker(1, 100)
+  image[ 50: 50 + 100,-50 - 100:-50] = aruco_dict.drawMarker(2, 100)
+  image[-50 - 100:-50,-50 - 100:-50] = aruco_dict.drawMarker(3, 100)
+
+  cv2.imwrite("markers.png", image)
+
+parameters =  aruco.DetectorParameters_create()
+
+def find_marker(image):
+    # Our operations on the frame come here
+  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
+  corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
+  
+  markers = [None] * 4
+
+  if not corners:
+    return markers
+
+  for corner, id in zip(corners, ids.flatten()):
+    # draw the bounding box of the ArUCo detection
+    c = corner[0]
+    cv2.line(image, tuple(c[0]), tuple(c[1]), (0, 255, 0), 2)
+    cv2.line(image, tuple(c[1]), tuple(c[2]), (0, 255, 0), 2)
+    cv2.line(image, tuple(c[2]), tuple(c[3]), (0, 255, 0), 2)
+    cv2.line(image, tuple(c[3]), tuple(c[0]), (0, 255, 0), 2)
+    cv2.putText(image, str(id), (int(c[0][0]), int(c[0][1]) - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
+
+    cX = sum(c[:,0]) / 4
+    cY = sum(c[:,1]) / 4
+    cv2.circle(image, (int(cX), int(cY)), 4, (0, 0, 255), -1)
+
+    if id < 4:
+      markers[id] = (cX, cY)
+
+  return markers
+
+def measureDistances(image):
+    markers = find_marker(image)
+
+    if markers[0] and markers[1]:
+      cv2.line(image, (int(markers[0][0]),int(markers[0][1])), (int(markers[1][0]),int(markers[1][1])), (255, 0, 0), 2)
+      
+      length = np.sqrt((markers[0][0]-markers[1][0])**2 + (markers[0][1]-markers[1][1])**2)
+      middle = abs(markers[0][0]-markers[1][0])
+
+      return middle, length
+
+    else:
+      return None
+
+if __name__ == "__main__":
+  cap = cv2.VideoCapture(0) 
+  while True:
+    success, image = cap.read()
+    markers = find_marker(image)
+
+    if markers[0] and markers[1]:
+      cv2.line(image, (int(markers[0][0]),int(markers[0][1])), (int(markers[1][0]),int(markers[1][1])), (255, 0, 0), 2)
+      length = np.sqrt((markers[0][0]-markers[1][0])**2 + (markers[0][1]-markers[1][1])**2)
+
+      cv2.putText(image, "%.2fpx" % (length), (int(markers[0][0]), int(markers[0][1])), cv2.FONT_HERSHEY_SIMPLEX,
+        1.0, (255, 255, 0), 3)
+    cv2.imshow("image", image)
+    if cv2.waitKey(1) & 0xFF == ord('q'):
+      break

BIN
raspberry-pi/markers.png


+ 67 - 5
raspberry-pi/sensors.py

@@ -1,11 +1,17 @@
 from connection import ArduinoSlave
+import markerDetection
 import time
+import statistics
+import math
+import threading, cv2
 
 conn = ArduinoSlave()
 
 class AcusticSensor:
   def __init__(self):
-    pass
+    self.sonic_speed_left = 0
+    self.sonic_speed_right = 0
+    self.sensor_distance = 450 #in mm
 
   def start(self):
     if not conn.isConnected():
@@ -15,11 +21,40 @@ class AcusticSensor:
   def _readCb(self, raw):
     print("acc: ", conn.getAcusticRTTs())
 
-  def calibrate(self, x, y):
-    pass
+  def calibrate(self, distance):
+    print("Move gondel to far left corner")
+    input()
+    speed = list()
+    for i in range(100):
+        time_val = [1.312,0]
+        speed.append(distance/time_val[0])
+    self.sonic_speed_left = statistics.mean(speed)
+    print("sonic speed left:",self.sonic_speed_left)
+
+    print("Move gondel to far right corner")
+    input()
+    speed = list()
+    for i in range(100):
+        time_val = [0,1.312]
+        speed.append(distance/time_val[1])
+    self.sonic_speed_right = statistics.mean(speed)
+    print("sonic speed right:",self.sonic_speed_right)
 
   def read(self):
-    return (0, 0)
+    return conn.getAcusticRTTs()
+
+  def calculate_position(self):
+        if not self.sonic_speed_right or not self.sonic_speed_left:
+            print("sensor not calibrated! calibrate now!")
+            return (0,0)
+        else:
+            time_val = self.read()
+            distance_left = time_val[0] * self.sonic_speed_left / 1000 # seconds -> mseconds
+            distance_right = time_val[1] * self.sonic_speed_right / 1000
+            print(distance_left,distance_right)
+            x = (self.sensor_distance**2 - distance_right**2 + distance_left**2) / (2*self.sensor_distance)
+            y = math.sqrt(distance_left**2 - x**2)
+            return (x,y)
 
 class MagneticSensor:
   def __init__(self):
@@ -37,12 +72,39 @@ class MagneticSensor:
     pass
 
   def read(self):
-    return (0, 0)
+    return conn.getMagneticField()
+
+class OpticalSensor():
+  def __init__(self):
+    self.cap = cv2.VideoCapture(0)
+    self._t  = None
+    self.values = None
+
+  def start(self):
+    if not self._t:
+      self._t = threading.Thread(target=self._getFrames, args=())
+      self._t.daemon = True  # thread dies when main thread (only non-daemon thread) exits.
+      self._t.start()
+
+  def _getFrames(self):
+    while True:
+      success, image = self.cap.read()
+      if success:
+        self.values = markerDetection.measureDistances(image)
+        print("opt:", self.values)
+
+  def calibrate(self, x, y):
+    pass
+
+  def read(self):
+    return self.values
 
 if __name__ == "__main__":
   acc = AcusticSensor()
   acc.start()
   mag = MagneticSensor()
   mag.start()
+  opt = OpticalSensor()
+  opt.start()
   while True:
     time.sleep(1)