|
@@ -0,0 +1,237 @@
|
|
|
+#!/usr/bin/python
|
|
|
+import argparse
|
|
|
+
|
|
|
+import tag
|
|
|
+
|
|
|
+import sys
|
|
|
+import cv2
|
|
|
+import time
|
|
|
+import numpy as np
|
|
|
+import os
|
|
|
+from numpy.linalg import norm
|
|
|
+from cmarkers import getCMarkers, get_transform_matrix_points
|
|
|
+
|
|
|
+##
|
|
|
+# Opens a video capture device with a resolution of 800x600
|
|
|
+# at 30 FPS.
|
|
|
+##
|
|
|
+def open_camera(cam_id = 1):
|
|
|
+ cap = cv2.VideoCapture(cam_id)
|
|
|
+ return cap
|
|
|
+
|
|
|
+##
|
|
|
+# Gets a frame from an open video device, or returns None
|
|
|
+# if the capture could not be made.
|
|
|
+##
|
|
|
+def get_frame(device):
|
|
|
+ ret, img = device.read()
|
|
|
+ if (ret == False): # failed to capture
|
|
|
+ print >> sys.stderr, "Error capturing from video device."
|
|
|
+ return None
|
|
|
+ return img
|
|
|
+
|
|
|
+##
|
|
|
+# Closes all OpenCV windows and releases video capture device
|
|
|
+# before exit.
|
|
|
+##
|
|
|
+def cleanup(cam_id = 0):
|
|
|
+ cv2.destroyAllWindows()
|
|
|
+ cv2.VideoCapture(cam_id).release()
|
|
|
+
|
|
|
+##
|
|
|
+# Creates a new RGB image of the specified size, initially
|
|
|
+# filled with black.
|
|
|
+##
|
|
|
+def new_rgb_image(width, height):
|
|
|
+ image = numpy.zeros( (height, width, 3), numpy.uint8)
|
|
|
+ return image
|
|
|
+
|
|
|
+# Global variable containing the 4 points selected by the user in the corners of the board
|
|
|
+corner_point_list = []
|
|
|
+
|
|
|
+##
|
|
|
+# This function is called by OpenCV when the user clicks
|
|
|
+# anywhere in a window displaying an image.
|
|
|
+##
|
|
|
+def mouse_click_callback(event, x, y, flags, param):
|
|
|
+ if event == cv2.EVENT_LBUTTONDOWN:
|
|
|
+ # print ("Click at (%d,%d)" % (x,y))
|
|
|
+ corner_point_list.append( (x,y) )
|
|
|
+
|
|
|
+def find_centers(contours):
|
|
|
+ centers = []
|
|
|
+ for contour in contours:
|
|
|
+ moments = cv2.moments(contour, True)
|
|
|
+
|
|
|
+ center = (moments['m10']/moments['m00'] , moments['m01']/moments['m00'])
|
|
|
+ # Convert floating point contour center into an integer so that
|
|
|
+ # we can display it later.
|
|
|
+ center = (int(round(center[0])),int(round(center[1])))
|
|
|
+ centers.append(center)
|
|
|
+ return centers
|
|
|
+
|
|
|
+def find_contours(image):
|
|
|
+ ret,thresh = cv2.threshold(image,127,255,0)
|
|
|
+ image, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
|
|
|
+ return contours
|
|
|
+
|
|
|
+##
|
|
|
+# Computes a perspective transform matrix by capturing a single
|
|
|
+# frame from a video source and displaying it to the user for
|
|
|
+# corner selection.
|
|
|
+#
|
|
|
+# Parameters:
|
|
|
+# * dev: Video Device (from open_camera())
|
|
|
+# * board_size: A tuple/list with 2 elements containing the width and height (respectively) of the gameboard (in arbitrary units, like inches)
|
|
|
+# * dpi: Scaling factor for elements of board_size
|
|
|
+# * calib_file: Optional. If specified, the perspective transform matrix is saved under this filename.
|
|
|
+# This file can be loaded later to bypass the calibration step (assuming nothing has moved).
|
|
|
+##
|
|
|
+def get_transform_matrix(dev, board_size, dpi, calib_file = None):
|
|
|
+ # Read a frame from the video device
|
|
|
+ img = get_frame(dev)
|
|
|
+
|
|
|
+ # Displace image to user
|
|
|
+ cv2.imshow("Calibrate", img)
|
|
|
+
|
|
|
+ # Register the mouse callback on this window. When
|
|
|
+ # the user clicks anywhere in the "Calibrate" window,
|
|
|
+ # the function mouse_click_callback() is called (defined above)
|
|
|
+ cv2.setMouseCallback("Calibrate", mouse_click_callback)
|
|
|
+
|
|
|
+ # Wait until the user has selected 4 points
|
|
|
+ while True:
|
|
|
+ # If the user has selected all 4 points, exit loop.
|
|
|
+ if (len(corner_point_list) >= 4):
|
|
|
+ print ("Got 4 points: "+str(corner_point_list))
|
|
|
+ break
|
|
|
+
|
|
|
+ # If the user hits a key, exit loop, otherwise remain.
|
|
|
+ if (cv2.waitKey(10) >= 0):
|
|
|
+ break;
|
|
|
+
|
|
|
+ # Close the calibration window:
|
|
|
+ cv2.destroyWindow("Calibrate")
|
|
|
+
|
|
|
+ # If the user selected 4 points
|
|
|
+ if (len(corner_point_list) >= 4):
|
|
|
+ # Do calibration
|
|
|
+
|
|
|
+ # src is a list of 4 points on the original image selected by the user
|
|
|
+ # in the order [TOP_LEFT, BOTTOM_LEFT, TOP_RIGHT, BOTTOM_RIGHT]
|
|
|
+ src = np.array(corner_point_list, np.float32)
|
|
|
+
|
|
|
+ # dest is a list of where these 4 points should be located on the
|
|
|
+ # rectangular board (in the same order):
|
|
|
+ dest = np.array( [ (0, 0), (0, board_size[1]*dpi), (board_size[0]*dpi, 0), (board_size[0]*dpi, board_size[1]*dpi) ], np.float32)
|
|
|
+
|
|
|
+ # Calculate the perspective transform matrix
|
|
|
+ trans = cv2.getPerspectiveTransform(src, dest)
|
|
|
+
|
|
|
+ # If we were given a calibration filename, save this matrix to a file
|
|
|
+ if calib_file:
|
|
|
+ np.savetxt(calib_file, trans)
|
|
|
+ return trans
|
|
|
+ else:
|
|
|
+ return None
|
|
|
+
|
|
|
+def get_green_dots(img_orig):
|
|
|
+ # # Threshold the HSV image to get only blue colors
|
|
|
+ # mask = cv2.inRange(img_orig, lower_blue, upper_blue)
|
|
|
+ b,g,r = cv2.split(img_orig)
|
|
|
+ gg = g - 6
|
|
|
+ green = ((gg > r) & (gg > b) & (g > 120)).astype(np.uint8)*255
|
|
|
+ # green = cv2.cvtColor(green, cv2.COLOR_GRAY2BGR)
|
|
|
+ kernel = np.ones((4,4),np.uint8)
|
|
|
+ opening = cv2.morphologyEx(green, cv2.MORPH_OPEN, kernel)
|
|
|
+
|
|
|
+ img = cv2.bitwise_and(img_orig,img_orig,mask = opening)
|
|
|
+ contours = find_contours(opening)
|
|
|
+ return find_centers(contours)
|
|
|
+
|
|
|
+#####################################################
|
|
|
+### Calibration Example ###
|
|
|
+if __name__ == "__main__":
|
|
|
+ parser = argparse.ArgumentParser()
|
|
|
+ cam_id = 0
|
|
|
+ dev = open_camera(cam_id)
|
|
|
+
|
|
|
+ parser.add_argument('-a',
|
|
|
+ '--automatic',
|
|
|
+ action='store_false',
|
|
|
+ help='Enable auto detect green dots (default=False)')
|
|
|
+
|
|
|
+ parser.add_argument('-n',
|
|
|
+ '--nocamera',
|
|
|
+ action='store_false',
|
|
|
+ help='Disable camera, players need to send the position (default=False)')
|
|
|
+
|
|
|
+ args = parser.parse_args()
|
|
|
+ manual = args.automatic
|
|
|
+ camera = args.nocamera
|
|
|
+
|
|
|
+ dev = None
|
|
|
+
|
|
|
+ if camera:
|
|
|
+ dev = open_camera(cam_id)
|
|
|
+
|
|
|
+ # The size of the board in inches, measured between the two
|
|
|
+ # robot boundaries:
|
|
|
+ board_size = [84, 84]
|
|
|
+ point = []
|
|
|
+
|
|
|
+ # Number of pixels to display per inch in the final transformed image. This
|
|
|
+ # was selected somewhat arbitrarily (I chose 17 because it fit on my screen):
|
|
|
+ dpi = 10
|
|
|
+
|
|
|
+ # Size (in pixels) of the transformed image
|
|
|
+ transform_size = (int(board_size[0]*dpi), int(board_size[1]*dpi))
|
|
|
+
|
|
|
+ # Calculate the perspective transform matrix
|
|
|
+ transform = None
|
|
|
+ if manual and camera:
|
|
|
+ transform = get_transform_matrix(dev, board_size, dpi)
|
|
|
+ # transform = get_transform_matrix_points([(327, 75), (280, 475), (805, 141), (805, 568)], board_size, dpi)
|
|
|
+
|
|
|
+ server = tag.TagServer(10318, board_size)
|
|
|
+
|
|
|
+ while True:
|
|
|
+ img_orig = np.zeros(transform_size)
|
|
|
+ if camera:
|
|
|
+ img_orig = get_frame(dev)
|
|
|
+ else:
|
|
|
+ time.sleep(0.01)
|
|
|
+
|
|
|
+ if img_orig is not None: # if we did get an image
|
|
|
+ img = img_orig
|
|
|
+ if camera:
|
|
|
+ # Apply the transformation matrix to skew the image and display it
|
|
|
+ if not manual:
|
|
|
+ centers = get_green_dots(img_orig)
|
|
|
+ transform = get_transform_matrix_points(centers, board_size, dpi)
|
|
|
+
|
|
|
+ img = cv2.warpPerspective(img_orig, transform, dsize=transform_size)
|
|
|
+ markers = getCMarkers(img)
|
|
|
+
|
|
|
+ for i in markers:
|
|
|
+ idd, p, head = i
|
|
|
+ server.updatePosition(idd, p[0]/dpi, p[1]/dpi, head)
|
|
|
+ p = (int(p[0]), int(p[1]))
|
|
|
+ cv2.circle(img, p, 30, (0,255,0))
|
|
|
+ cv2.putText(img, str(idd), p, cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 255))
|
|
|
+
|
|
|
+ for robot in server.robots():
|
|
|
+ for line in robot.lines():
|
|
|
+ p1 = (line[0]*dpi).astype(int)
|
|
|
+ p2 = (line[1]*dpi).astype(int)
|
|
|
+ cv2.line(img, (p1[0], p1[1]), (p2[0], p2[1]), (255, 255, 0))
|
|
|
+ cv2.imshow("warped", img)
|
|
|
+
|
|
|
+ else: # if we failed to capture (camera disconnected?), then quit
|
|
|
+ break
|
|
|
+
|
|
|
+ if (cv2.waitKey(2) >= 0):
|
|
|
+ server.stop()
|
|
|
+ break
|
|
|
+
|
|
|
+ cleanup(cam_id)
|