server.py 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. #!/usr/bin/python
  2. import argparse
  3. import tag
  4. import sys
  5. import cv2
  6. import time
  7. import numpy as np
  8. import os
  9. from cmarkers import getCMarkers, get_transform_matrix_points
  10. ##
  11. # Opens a video capture device with a resolution of 800x600
  12. # at 30 FPS.
  13. ##
  14. def open_camera(cam_id = 1):
  15. cap = cv2.VideoCapture(cam_id)
  16. return cap
  17. ##
  18. # Gets a frame from an open video device, or returns None
  19. # if the capture could not be made.
  20. ##
  21. def get_frame(device):
  22. ret, img = device.read()
  23. if (ret == False): # failed to capture
  24. print >> sys.stderr, "Error capturing from video device."
  25. return None
  26. return img
  27. ##
  28. # Closes all OpenCV windows and releases video capture device
  29. # before exit.
  30. ##
  31. def cleanup(cam_id = 0):
  32. cv2.destroyAllWindows()
  33. cv2.VideoCapture(cam_id).release()
  34. ##
  35. # Creates a new RGB image of the specified size, initially
  36. # filled with black.
  37. ##
  38. def new_rgb_image(width, height):
  39. image = numpy.zeros( (height, width, 3), numpy.uint8)
  40. return image
  41. # Global variable containing the 4 points selected by the user in the corners of the board
  42. corner_point_list = []
  43. ##
  44. # This function is called by OpenCV when the user clicks
  45. # anywhere in a window displaying an image.
  46. ##
  47. def mouse_click_callback(event, x, y, flags, param):
  48. if event == cv2.EVENT_LBUTTONDOWN:
  49. # print ("Click at (%d,%d)" % (x,y))
  50. corner_point_list.append( (x,y) )
  51. def find_centers(contours):
  52. centers = []
  53. for contour in contours:
  54. moments = cv2.moments(contour, True)
  55. center = (moments['m10']/moments['m00'] , moments['m01']/moments['m00'])
  56. # Convert floating point contour center into an integer so that
  57. # we can display it later.
  58. center = (int(round(center[0])),int(round(center[1])))
  59. centers.append(center)
  60. return centers
  61. def find_contours(image):
  62. ret,thresh = cv2.threshold(image,127,255,0)
  63. image, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
  64. return contours
  65. ##
  66. # Computes a perspective transform matrix by capturing a single
  67. # frame from a video source and displaying it to the user for
  68. # corner selection.
  69. #
  70. # Parameters:
  71. # * dev: Video Device (from open_camera())
  72. # * board_size: A tuple/list with 2 elements containing the width and height (respectively) of the gameboard (in arbitrary units, like inches)
  73. # * dpi: Scaling factor for elements of board_size
  74. # * calib_file: Optional. If specified, the perspective transform matrix is saved under this filename.
  75. # This file can be loaded later to bypass the calibration step (assuming nothing has moved).
  76. ##
  77. def get_transform_matrix(dev, board_size, dpi, calib_file = None):
  78. # Read a frame from the video device
  79. img = get_frame(dev)
  80. # Displace image to user
  81. cv2.imshow("Calibrate", img)
  82. # Register the mouse callback on this window. When
  83. # the user clicks anywhere in the "Calibrate" window,
  84. # the function mouse_click_callback() is called (defined above)
  85. cv2.setMouseCallback("Calibrate", mouse_click_callback)
  86. # Wait until the user has selected 4 points
  87. while True:
  88. # If the user has selected all 4 points, exit loop.
  89. if (len(corner_point_list) >= 4):
  90. print ("Got 4 points: "+str(corner_point_list))
  91. break
  92. # If the user hits a key, exit loop, otherwise remain.
  93. if (cv2.waitKey(10) >= 0):
  94. break;
  95. # Close the calibration window:
  96. cv2.destroyWindow("Calibrate")
  97. # If the user selected 4 points
  98. if (len(corner_point_list) >= 4):
  99. # Do calibration
  100. # src is a list of 4 points on the original image selected by the user
  101. # in the order [TOP_LEFT, BOTTOM_LEFT, TOP_RIGHT, BOTTOM_RIGHT]
  102. src = np.array(corner_point_list, np.float32)
  103. # dest is a list of where these 4 points should be located on the
  104. # rectangular board (in the same order):
  105. dest = np.array( [ (0, 0), (0, board_size[1]*dpi), (board_size[0]*dpi, 0), (board_size[0]*dpi, board_size[1]*dpi) ], np.float32)
  106. # Calculate the perspective transform matrix
  107. trans = cv2.getPerspectiveTransform(src, dest)
  108. # If we were given a calibration filename, save this matrix to a file
  109. if calib_file:
  110. np.savetxt(calib_file, trans)
  111. return trans
  112. else:
  113. return None
  114. def get_green_dots(img_orig):
  115. # # Threshold the HSV image to get only blue colors
  116. # mask = cv2.inRange(img_orig, lower_blue, upper_blue)
  117. b,g,r = cv2.split(img_orig)
  118. gg = g - 6
  119. green = ((gg > r) & (gg > b) & (g > 120)).astype(np.uint8)*255
  120. # green = cv2.cvtColor(green, cv2.COLOR_GRAY2BGR)
  121. kernel = np.ones((4,4),np.uint8)
  122. opening = cv2.morphologyEx(green, cv2.MORPH_OPEN, kernel)
  123. img = cv2.bitwise_and(img_orig,img_orig,mask = opening)
  124. contours = find_contours(opening)
  125. return find_centers(contours)
  126. #####################################################
  127. ### Calibration Example ###
  128. if __name__ == "__main__":
  129. parser = argparse.ArgumentParser()
  130. cam_id = 1
  131. dev = None
  132. parser.add_argument('-a',
  133. '--automatic',
  134. action='store_false',
  135. help='Enable auto detect green dots (default=False)')
  136. parser.add_argument('-n',
  137. '--nocamera',
  138. action='store_false',
  139. help='Disable camera, players need to send the position (default=False)')
  140. args = parser.parse_args()
  141. manual = args.automatic
  142. camera = args.nocamera
  143. dev = None
  144. if camera:
  145. dev = open_camera(cam_id)
  146. # The size of the board in inches, measured between the two
  147. # robot boundaries:
  148. board_size = [90, 90]
  149. point = []
  150. # Number of pixels to display per inch in the final transformed image. This
  151. # was selected somewhat arbitrarily (I chose 17 because it fit on my screen):
  152. dpi = 10
  153. # Size (in pixels) of the transformed image
  154. transform_size = (int(board_size[0]*dpi), int(board_size[1]*dpi))
  155. # Calculate the perspective transform matrix
  156. transform = None
  157. if manual and camera:
  158. #transform = get_transform_matrix(dev, board_size, dpi)
  159. transform = get_transform_matrix_points([(74, 20), (76, 399), (445, 4), (530, 368)]
  160. , board_size, dpi)
  161. server = tag.TagServer(10318, board_size)
  162. while True:
  163. img_orig = np.zeros([transform_size[0], transform_size[1], 3])
  164. if camera:
  165. img_orig = get_frame(dev)
  166. else:
  167. time.sleep(0.1)
  168. if img_orig is not None: # if we did get an image
  169. img = img_orig
  170. if camera:
  171. # Apply the transformation matrix to skew the image and display it
  172. if not manual:
  173. centers = get_green_dots(img_orig)
  174. transform = get_transform_matrix_points(centers, board_size, dpi)
  175. img = cv2.warpPerspective(img_orig, transform, dsize=transform_size)
  176. markers = getCMarkers(img)
  177. for i in markers:
  178. idd, p, head = i
  179. server.updatePosition(idd, p[0]/dpi, p[1]/dpi, head)
  180. p = (int(p[0]), int(p[1]))
  181. cv2.circle(img, p, 30, (0,255,0))
  182. cv2.putText(img, str(idd), p, cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 255))
  183. for robot in server.robots():
  184. for line in robot.lines():
  185. p1 = (line[0]*dpi).astype(int)
  186. p2 = (line[1]*dpi).astype(int)
  187. color = (255, 255, 0)
  188. if robot == server.tag:
  189. color = (0, 0, 255)
  190. cv2.line(img, (p1[0], p1[1]), (p2[0], p2[1]), color)
  191. if server.paused:
  192. cv2.putText(img, "PAUSE", (0,30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))
  193. cv2.imshow("warped", img)
  194. else: # if we failed to capture (camera disconnected?), then quit
  195. break
  196. k = cv2.waitKey(33)
  197. if k == 115: # s
  198. server.startGame()
  199. elif k == 112: # p
  200. server.stopGame()
  201. elif k == 113: # q
  202. server.stop()
  203. break
  204. if camera:
  205. cleanup(cam_id)