server.py 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. #!/usr/bin/python
  2. import argparse
  3. import tag
  4. import sys
  5. import cv2
  6. import time
  7. import numpy as np
  8. import os
  9. from numpy.linalg import norm
  10. from cmarkers import getCMarkers, get_transform_matrix_points
  11. ##
  12. # Opens a video capture device with a resolution of 800x600
  13. # at 30 FPS.
  14. ##
  15. def open_camera(cam_id = 1):
  16. cap = cv2.VideoCapture(cam_id)
  17. return cap
  18. ##
  19. # Gets a frame from an open video device, or returns None
  20. # if the capture could not be made.
  21. ##
  22. def get_frame(device):
  23. ret, img = device.read()
  24. if (ret == False): # failed to capture
  25. print >> sys.stderr, "Error capturing from video device."
  26. return None
  27. return img
  28. ##
  29. # Closes all OpenCV windows and releases video capture device
  30. # before exit.
  31. ##
  32. def cleanup(cam_id = 0):
  33. cv2.destroyAllWindows()
  34. cv2.VideoCapture(cam_id).release()
  35. ##
  36. # Creates a new RGB image of the specified size, initially
  37. # filled with black.
  38. ##
  39. def new_rgb_image(width, height):
  40. image = numpy.zeros( (height, width, 3), numpy.uint8)
  41. return image
  42. # Global variable containing the 4 points selected by the user in the corners of the board
  43. corner_point_list = []
  44. ##
  45. # This function is called by OpenCV when the user clicks
  46. # anywhere in a window displaying an image.
  47. ##
  48. def mouse_click_callback(event, x, y, flags, param):
  49. if event == cv2.EVENT_LBUTTONDOWN:
  50. # print ("Click at (%d,%d)" % (x,y))
  51. corner_point_list.append( (x,y) )
  52. def find_centers(contours):
  53. centers = []
  54. for contour in contours:
  55. moments = cv2.moments(contour, True)
  56. center = (moments['m10']/moments['m00'] , moments['m01']/moments['m00'])
  57. # Convert floating point contour center into an integer so that
  58. # we can display it later.
  59. center = (int(round(center[0])),int(round(center[1])))
  60. centers.append(center)
  61. return centers
  62. def find_contours(image):
  63. ret,thresh = cv2.threshold(image,127,255,0)
  64. image, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
  65. return contours
  66. ##
  67. # Computes a perspective transform matrix by capturing a single
  68. # frame from a video source and displaying it to the user for
  69. # corner selection.
  70. #
  71. # Parameters:
  72. # * dev: Video Device (from open_camera())
  73. # * board_size: A tuple/list with 2 elements containing the width and height (respectively) of the gameboard (in arbitrary units, like inches)
  74. # * dpi: Scaling factor for elements of board_size
  75. # * calib_file: Optional. If specified, the perspective transform matrix is saved under this filename.
  76. # This file can be loaded later to bypass the calibration step (assuming nothing has moved).
  77. ##
  78. def get_transform_matrix(dev, board_size, dpi, calib_file = None):
  79. # Read a frame from the video device
  80. img = get_frame(dev)
  81. # Displace image to user
  82. cv2.imshow("Calibrate", img)
  83. # Register the mouse callback on this window. When
  84. # the user clicks anywhere in the "Calibrate" window,
  85. # the function mouse_click_callback() is called (defined above)
  86. cv2.setMouseCallback("Calibrate", mouse_click_callback)
  87. # Wait until the user has selected 4 points
  88. while True:
  89. # If the user has selected all 4 points, exit loop.
  90. if (len(corner_point_list) >= 4):
  91. print ("Got 4 points: "+str(corner_point_list))
  92. break
  93. # If the user hits a key, exit loop, otherwise remain.
  94. if (cv2.waitKey(10) >= 0):
  95. break;
  96. # Close the calibration window:
  97. cv2.destroyWindow("Calibrate")
  98. # If the user selected 4 points
  99. if (len(corner_point_list) >= 4):
  100. # Do calibration
  101. # src is a list of 4 points on the original image selected by the user
  102. # in the order [TOP_LEFT, BOTTOM_LEFT, TOP_RIGHT, BOTTOM_RIGHT]
  103. src = np.array(corner_point_list, np.float32)
  104. # dest is a list of where these 4 points should be located on the
  105. # rectangular board (in the same order):
  106. dest = np.array( [ (0, 0), (0, board_size[1]*dpi), (board_size[0]*dpi, 0), (board_size[0]*dpi, board_size[1]*dpi) ], np.float32)
  107. # Calculate the perspective transform matrix
  108. trans = cv2.getPerspectiveTransform(src, dest)
  109. # If we were given a calibration filename, save this matrix to a file
  110. if calib_file:
  111. np.savetxt(calib_file, trans)
  112. return trans
  113. else:
  114. return None
  115. def get_green_dots(img_orig):
  116. # # Threshold the HSV image to get only blue colors
  117. # mask = cv2.inRange(img_orig, lower_blue, upper_blue)
  118. b,g,r = cv2.split(img_orig)
  119. gg = g - 6
  120. green = ((gg > r) & (gg > b) & (g > 120)).astype(np.uint8)*255
  121. # green = cv2.cvtColor(green, cv2.COLOR_GRAY2BGR)
  122. kernel = np.ones((4,4),np.uint8)
  123. opening = cv2.morphologyEx(green, cv2.MORPH_OPEN, kernel)
  124. img = cv2.bitwise_and(img_orig,img_orig,mask = opening)
  125. contours = find_contours(opening)
  126. return find_centers(contours)
  127. #####################################################
  128. ### Calibration Example ###
  129. if __name__ == "__main__":
  130. parser = argparse.ArgumentParser()
  131. cam_id = 0
  132. dev = open_camera(cam_id)
  133. parser.add_argument('-a',
  134. '--automatic',
  135. action='store_false',
  136. help='Enable auto detect green dots (default=False)')
  137. parser.add_argument('-n',
  138. '--nocamera',
  139. action='store_false',
  140. help='Disable camera, players need to send the position (default=False)')
  141. args = parser.parse_args()
  142. manual = args.automatic
  143. camera = args.nocamera
  144. dev = None
  145. if camera:
  146. dev = open_camera(cam_id)
  147. # The size of the board in inches, measured between the two
  148. # robot boundaries:
  149. board_size = [84, 84]
  150. point = []
  151. # Number of pixels to display per inch in the final transformed image. This
  152. # was selected somewhat arbitrarily (I chose 17 because it fit on my screen):
  153. dpi = 10
  154. # Size (in pixels) of the transformed image
  155. transform_size = (int(board_size[0]*dpi), int(board_size[1]*dpi))
  156. # Calculate the perspective transform matrix
  157. transform = None
  158. if manual and camera:
  159. transform = get_transform_matrix(dev, board_size, dpi)
  160. # transform = get_transform_matrix_points([(327, 75), (280, 475), (805, 141), (805, 568)], board_size, dpi)
  161. server = tag.TagServer(10318, board_size)
  162. while True:
  163. img_orig = np.zeros(transform_size)
  164. if camera:
  165. img_orig = get_frame(dev)
  166. else:
  167. time.sleep(0.01)
  168. if img_orig is not None: # if we did get an image
  169. img = img_orig
  170. if camera:
  171. # Apply the transformation matrix to skew the image and display it
  172. if not manual:
  173. centers = get_green_dots(img_orig)
  174. transform = get_transform_matrix_points(centers, board_size, dpi)
  175. img = cv2.warpPerspective(img_orig, transform, dsize=transform_size)
  176. markers = getCMarkers(img)
  177. for i in markers:
  178. idd, p, head = i
  179. server.updatePosition(idd, p[0]/dpi, p[1]/dpi, head)
  180. p = (int(p[0]), int(p[1]))
  181. cv2.circle(img, p, 30, (0,255,0))
  182. cv2.putText(img, str(idd), p, cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 255))
  183. for robot in server.robots():
  184. for line in robot.lines():
  185. p1 = (line[0]*dpi).astype(int)
  186. p2 = (line[1]*dpi).astype(int)
  187. cv2.line(img, (p1[0], p1[1]), (p2[0], p2[1]), (255, 255, 0))
  188. cv2.imshow("warped", img)
  189. else: # if we failed to capture (camera disconnected?), then quit
  190. break
  191. if (cv2.waitKey(2) >= 0):
  192. server.stop()
  193. break
  194. cleanup(cam_id)