server.py 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. #!/usr/bin/python
  2. import argparse
  3. import tag
  4. import sys
  5. import cv2
  6. import time
  7. import numpy as np
  8. import os
  9. from cmarkers import getCMarkers, get_transform_matrix_points
  10. ##
  11. # Opens a video capture device with a resolution of 800x600
  12. # at 30 FPS.
  13. ##
  14. def open_camera(cam_id = 1):
  15. cap = cv2.VideoCapture(cam_id)
  16. return cap
  17. ##
  18. # Gets a frame from an open video device, or returns None
  19. # if the capture could not be made.
  20. ##
  21. def get_frame(device):
  22. ret, img = device.read()
  23. if (ret == False): # failed to capture
  24. print >> sys.stderr, "Error capturing from video device."
  25. return None
  26. return img
  27. ##
  28. # Closes all OpenCV windows and releases video capture device
  29. # before exit.
  30. ##
  31. def cleanup(cam_id = 0):
  32. cv2.destroyAllWindows()
  33. cv2.VideoCapture(cam_id).release()
  34. # Global variable containing the 4 points selected by the user in the corners of the board
  35. corner_point_list = []
  36. ##
  37. # This function is called by OpenCV when the user clicks
  38. # anywhere in a window displaying an image.
  39. ##
  40. def mouse_click_callback(event, x, y, flags, param):
  41. if event == cv2.EVENT_LBUTTONDOWN:
  42. # print ("Click at (%d,%d)" % (x,y))
  43. corner_point_list.append( (x,y) )
  44. def find_centers(contours):
  45. centers = []
  46. for contour in contours:
  47. moments = cv2.moments(contour, True)
  48. center = (moments['m10']/moments['m00'] , moments['m01']/moments['m00'])
  49. # Convert floating point contour center into an integer so that
  50. # we can display it later.
  51. center = (int(round(center[0])),int(round(center[1])))
  52. centers.append(center)
  53. return centers
  54. def find_contours(image):
  55. ret,thresh = cv2.threshold(image,127,255,0)
  56. image, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
  57. return contours
  58. ##
  59. # Computes a perspective transform matrix by capturing a single
  60. # frame from a video source and displaying it to the user for
  61. # corner selection.
  62. #
  63. # Parameters:
  64. # * dev: Video Device (from open_camera())
  65. # * board_size: A tuple/list with 2 elements containing the width and height (respectively) of the gameboard (in arbitrary units, like inches)
  66. # * dpi: Scaling factor for elements of board_size
  67. # * calib_file: Optional. If specified, the perspective transform matrix is saved under this filename.
  68. # This file can be loaded later to bypass the calibration step (assuming nothing has moved).
  69. ##
  70. def get_transform_matrix(dev, board_size, dpi, calib_file = None):
  71. # Read a frame from the video device
  72. img = get_frame(dev)
  73. # Displace image to user
  74. cv2.imshow("Calibrate", img)
  75. # Register the mouse callback on this window. When
  76. # the user clicks anywhere in the "Calibrate" window,
  77. # the function mouse_click_callback() is called (defined above)
  78. cv2.setMouseCallback("Calibrate", mouse_click_callback)
  79. # Wait until the user has selected 4 points
  80. while True:
  81. # If the user has selected all 4 points, exit loop.
  82. if (len(corner_point_list) >= 4):
  83. print ("Got 4 points: "+str(corner_point_list))
  84. break
  85. # If the user hits a key, exit loop, otherwise remain.
  86. if (cv2.waitKey(10) >= 0):
  87. break;
  88. # Close the calibration window:
  89. cv2.destroyWindow("Calibrate")
  90. # If the user selected 4 points
  91. if (len(corner_point_list) >= 4):
  92. # Do calibration
  93. # src is a list of 4 points on the original image selected by the user
  94. # in the order [TOP_LEFT, BOTTOM_LEFT, TOP_RIGHT, BOTTOM_RIGHT]
  95. src = np.array(corner_point_list, np.float32)
  96. # dest is a list of where these 4 points should be located on the
  97. # rectangular board (in the same order):
  98. dest = np.array( [ (0, 0), (0, board_size[1]*dpi), (board_size[0]*dpi, 0), (board_size[0]*dpi, board_size[1]*dpi) ], np.float32)
  99. print(corner_point_list)
  100. # Calculate the perspective transform matrix
  101. trans = cv2.getPerspectiveTransform(src, dest)
  102. # If we were given a calibration filename, save this matrix to a file
  103. if calib_file:
  104. np.savetxt(calib_file, trans)
  105. return trans
  106. else:
  107. return None
  108. #####################################################
  109. ### Calibration Example ###
  110. if __name__ == "__main__":
  111. parser = argparse.ArgumentParser()
  112. cam_id = 1
  113. dev = None
  114. parser.add_argument('-n',
  115. '--nocamera',
  116. action='store_false',
  117. help='Disable camera, players need to send the position (default=False)')
  118. args = parser.parse_args()
  119. camera = args.nocamera
  120. dev = None
  121. if camera:
  122. dev = open_camera(cam_id)
  123. # The size of the board in inches, measured between the two
  124. # robot boundaries:
  125. board_size = [215, 185]
  126. point = []
  127. # Number of pixels to display per inch in the final transformed image. This
  128. # was selected somewhat arbitrarily (I chose 17 because it fit on my screen):
  129. dpi = 5
  130. # Size (in pixels) of the transformed image
  131. transform_size = (int(board_size[0]*dpi), int(board_size[1]*dpi))
  132. # Calculate the perspective transform matrix
  133. transform = None
  134. if camera:
  135. # transform = get_transform_matrix(dev, board_size, dpi)
  136. transform = get_transform_matrix_points([(340, 10), (78, 1066), (1502, 6), (1806, 1045)], board_size, dpi)
  137. server = tag.TagServer(10318, board_size)
  138. while True:
  139. img_orig = np.zeros([transform_size[0], transform_size[1], 3])
  140. if camera:
  141. img_orig = get_frame(dev)
  142. else:
  143. time.sleep(0.5)
  144. start = time.time()
  145. if img_orig is not None: # if we did get an image
  146. img = img_orig
  147. if camera:
  148. img = cv2.warpPerspective(img_orig, transform, dsize=transform_size)
  149. e1 = cv2.getTickCount()
  150. # your code execution
  151. markers = getCMarkers(img)
  152. e2 = cv2.getTickCount()
  153. tt = (e2 - e1)/ cv2.getTickFrequency()
  154. # print("mk", tt)
  155. for i in markers:
  156. idd, p, head = i
  157. server.updatePosition(idd, p[0]/dpi, p[1]/dpi, head)
  158. p = (int(p[0]), int(p[1]))
  159. cv2.circle(img, p, 30, (0,255,0))
  160. cv2.putText(img, str(idd), p, cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 255))
  161. for robot in server.robots():
  162. for line in robot.lines():
  163. p1 = (line[0]*dpi).astype(int)
  164. p2 = (line[1]*dpi).astype(int)
  165. color = (255, 255, 0)
  166. if robot == server.tag:
  167. color = (0, 0, 255)
  168. cv2.line(img, (p1[0], p1[1]), (p2[0], p2[1]), color)
  169. if server.paused:
  170. cv2.putText(img, "PAUSE", (0,30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))
  171. # res = cv2.resize(img,None,fx=.5, fy=.5)
  172. end = time.time()
  173. seconds = end - start
  174. fps = 1.0 / seconds
  175. start = time.time()
  176. cv2.putText(img, str(fps), (0,60), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))
  177. res = img
  178. cv2.imshow("warped", res)
  179. else: # if we failed to capture (camera disconnected?), then quit
  180. break
  181. k = cv2.waitKey(1)
  182. if k == 115: # s
  183. server.startGame()
  184. elif k == 112: # p
  185. server.stopGame()
  186. elif k == 113: # q
  187. server.stop()
  188. break
  189. if camera:
  190. cleanup(cam_id)