Team 3038 Code Release Vision Tracking
This year if possible we are going to be off boarding our vision tracking onto a raspberry pi. We have two programs we run. The calibration program allows us to hook the camera to our laptop and adjust the HSL values until they are just right. This makes it extremely easy to adjust to different lightnings. The code on the rasperberry pi is set up track the object and relay the values over network tables.
Calibration Code:
Code:
import cv2
import numpy as np
def nothing(x):
pass
#capture video
cap = cv2.VideoCapture(0)
cv2.namedWindow('mask')
# create trackbars for color change
cv2.createTrackbar('Hue Lower','mask',0,180,nothing)
cv2.createTrackbar('Saturation Lower','mask',0,255,nothing)
cv2.createTrackbar('Value Lower','mask',0,255,nothing)
cv2.createTrackbar('Hue Upper','mask',0,180,nothing)
cv2.createTrackbar('Saturation Upper','mask',0,255,nothing)
cv2.createTrackbar('Value Upper','mask',0,255,nothing)
while(1):
#get the camera feed
ret, frame = cap.read()
#get the values from the trackbars
hl = cv2.getTrackbarPos('HL','mask')
sl = cv2.getTrackbarPos('SL','mask')
vl = cv2.getTrackbarPos('VL','mask')
hu = cv2.getTrackbarPos('HU','mask')
su = cv2.getTrackbarPos('SU','mask')
vu = cv2.getTrackbarPos('VU','mask')
#input the track bars into the array
lower_bound = np.array([hl,sl,vl])
upper_bound = np.array([hu,su,vu])
#perform a color converstion from BGF to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
#mask the the values based on the trackbar positions
mask = cv2.inRange(hsv,lower_bound,upper_bound)
#display the track bar with the mask as the output
cv2.imshow('mask',mask)
#break the while loop if the esc key is pressed
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
#close all windows
cv2.destroyAllWindows()
Raspberry Code:
Code:
import cv2
import numpy as np
from networktables import NetworkTable
import logging
logging.basicConfig(level=logging.DEBUG)
NetworkTable.setIPAddress("192.168.2.171")
NetworkTable.setClientMode()
NetworkTable.initialize()
sd = NetworkTable.getTable("SmartDashboard")
h_min =0
h_max = 0
s_min = 0
s_max = 0
v_min = 0
v_max = 0
kernel_size = 3
erode_iterations = 1
dilate_iterations = 1
camera_width = 320
camera_height = 240
center_x = 160
center_y = 120
angle_of_camera = 68.5
degrees_per_x_error = -angle_of_camera / camera_width
cap = cv2.VideoCapture(0)
#cap = cv2.VideoCapture("http://axis-camera.local/view/viewer_index.shtml?id=$
i = 0
while(1):
#get the camera feed
ret, frame = cap.read()
#resize the camera
# frame = cv2.resize(frame,(320,240))
#input the track bars into the array
lower_bound = np.array([42,0,235])
upper_bound = np.array([67,255,255])
#perform a color converstion from BGF to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
#mask the the values based on the trackbar positions
mask = cv2.inRange(hsv,lower_bound,upper_bound)
#erode
kernel = np.ones((2,2),np.uint8)
erosion = cv2.erode(mask,kernel,iterations = 1)
#dilate
dilate = cv2.dilate(erosion,kernel,iterations = 4)
#contour filtering
image, contours, hierarchy = cv2.findContours(dilate,cv2.RETR_TREE,cv2.CH$
contourMax = 0
contourMaxPerimeter = 0
contourX = 0
contourY = 0
for contour in contours:
M = cv2.moments(contour)
if (M['m00'] != 0) :
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
area = cv2.contourArea(contour)
perimeter = cv2.arcLength(contour,True)
if perimeter > contourMaxPerimeter:
contourMax = contour
contourMaxPerimeter = perimeter
contourX = cx
contourY = cy
# sd.putNumber('Contour',i)
# i += 1
sd.putDouble('Contour X',contourX)
sd.putDouble('Contour Y',contourY)
sd.putDouble('X Error',center_x - contourX)
sd.putDouble('Y Error',center_y - contourY)
sd.putDouble('Rotate Error',degrees_per_x_error * (center_x - contourX))
sd.putDouble('Perimeter', contourMaxPerimeter)
|