main.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. #!/usr/bin/python
  2. import argparse
  3. import tag
  4. import sys
  5. import cv2
  6. import time
  7. import numpy as np
  8. import os
  9. from cmarkers import getCMarkers, get_transform_matrix_points
  10. # open camera
  11. def open_camera(cam_id = 1):
  12. cap = cv2.VideoCapture(cam_id)
  13. return cap
  14. # get frame
  15. def get_frame(device):
  16. ret, img = device.read()
  17. if (ret == False): # failed to capture
  18. print >> sys.stderr, "Error capturing from video device."
  19. return None
  20. return img
  21. # close camera
  22. def cleanup(cam_id = 0):
  23. cv2.destroyAllWindows()
  24. cv2.VideoCapture(cam_id).release()
  25. # 4 points selected by the user in the corners of the board
  26. corner_point_list = []
  27. ##
  28. # This function is called by OpenCV when the user clicks
  29. # anywhere in a window displaying an image.
  30. ##
  31. def mouse_click_callback(event, x, y, flags, param):
  32. if event == cv2.EVENT_LBUTTONDOWN:
  33. # print ("Click at (%d,%d)" % (x,y))
  34. corner_point_list.append( (x,y) )
  35. def find_centers(contours):
  36. centers = []
  37. for contour in contours:
  38. moments = cv2.moments(contour, True)
  39. center = (moments['m10']/moments['m00'] , moments['m01']/moments['m00'])
  40. # Convert floating point contour center into an integer so that
  41. # we can display it later.
  42. center = (int(round(center[0])),int(round(center[1])))
  43. centers.append(center)
  44. return centers
  45. def find_contours(image):
  46. ret,thresh = cv2.threshold(image,127,255,0)
  47. image, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
  48. return contours
  49. ##
  50. # Computes a perspective transform matrix by capturing a single
  51. # frame from a video source and displaying it to the user for
  52. # corner selection.
  53. #
  54. # Parameters:
  55. # * dev: Video Device (from open_camera())
  56. # * board_size: A tuple/list with 2 elements containing the width and height (respectively) of the gameboard (in arbitrary units, like inches)
  57. # * dpi: Scaling factor for elements of board_size
  58. # * calib_file: Optional. If specified, the perspective transform matrix is saved under this filename.
  59. # This file can be loaded later to bypass the calibration step (assuming nothing has moved).
  60. ##
  61. def get_transform_matrix(dev, board_size, dpi, calib_file = None):
  62. # Read a frame from the video device
  63. img = get_frame(dev)
  64. # Displace image to user
  65. cv2.imshow("Calibrate", img)
  66. # Register the mouse callback on this window. When
  67. # the user clicks anywhere in the "Calibrate" window,
  68. # the function mouse_click_callback() is called (defined above)
  69. cv2.setMouseCallback("Calibrate", mouse_click_callback)
  70. # Wait until the user has selected 4 points
  71. while True:
  72. # If the user has selected all 4 points, exit loop.
  73. if (len(corner_point_list) >= 4):
  74. print ("Got 4 points: "+str(corner_point_list))
  75. break
  76. # If the user hits a key, exit loop, otherwise remain.
  77. if (cv2.waitKey(10) >= 0):
  78. break;
  79. # Close the calibration window:
  80. cv2.destroyWindow("Calibrate")
  81. # If the user selected 4 points
  82. if (len(corner_point_list) >= 4):
  83. # Do calibration
  84. # src is a list of 4 points on the original image selected by the user
  85. # in the order [TOP_LEFT, BOTTOM_LEFT, TOP_RIGHT, BOTTOM_RIGHT]
  86. src = np.array(corner_point_list, np.float32)
  87. # dest is a list of where these 4 points should be located on the
  88. # rectangular board (in the same order):
  89. a = [ (0, 0), (0, board_size[1]*dpi), (board_size[0]*dpi, 0), (board_size[0]*dpi, board_size[1]*dpi) ]
  90. dest = np.array(a, np.float32)
  91. print(corner_point_list)
  92. # Calculate the perspective transform matrix
  93. trans = cv2.getPerspectiveTransform(src, dest)
  94. # If we were given a calibration filename, save this matrix to a file
  95. if calib_file:
  96. np.savetxt(calib_file, trans)
  97. return trans
  98. else:
  99. return None
  100. if __name__ == "__main__":
  101. parser = argparse.ArgumentParser()
  102. cam_id = 1
  103. dev = None
  104. parser.add_argument('-n',
  105. '--nocamera',
  106. action='store_false',
  107. help='Disable camera, players need to send the position (default=False)')
  108. args = parser.parse_args()
  109. camera = args.nocamera
  110. dev = None
  111. if camera:
  112. dev = open_camera(cam_id)
  113. # The size of the board in inches, measured between the two
  114. # robot boundaries:
  115. board_size = [185, 215]
  116. point = []
  117. if camera:
  118. board_size = [board_size[1], board_size[0]]
  119. # Number of pixels to display per inch in the final transformed image. This
  120. # was selected somewhat arbitrarily (I chose 17 because it fit on my screen):
  121. dpi = 5
  122. # Size (in pixels) of the transformed image
  123. transform_size = (int(board_size[0]*dpi), int(board_size[1]*dpi))
  124. # Calculate the perspective transform matrix
  125. transform = None
  126. if camera:
  127. # transform = get_transform_matrix(dev, board_size, dpi)
  128. transform = get_transform_matrix_points([(340, 10), (78, 1066), (1502, 6), (1806, 1045)], board_size, dpi)
  129. server = tag.TagServer(10318, board_size)
  130. while True:
  131. img_orig = np.zeros([transform_size[0], transform_size[1], 3])
  132. if camera:
  133. img_orig = get_frame(dev)
  134. else:
  135. time.sleep(0.1)
  136. start = time.time()
  137. if img_orig is not None: # if we did get an image
  138. img = img_orig
  139. if camera:
  140. img = cv2.warpPerspective(img_orig, transform, dsize=transform_size)
  141. e1 = cv2.getTickCount()
  142. # your code execution
  143. markers = getCMarkers(img)
  144. e2 = cv2.getTickCount()
  145. tt = (e2 - e1)/ cv2.getTickFrequency()
  146. # print("mk", tt)
  147. for i in markers:
  148. idd, p, head = i
  149. server.updatePosition(idd, p[0]/dpi, p[1]/dpi, head)
  150. p = (int(p[0]), int(p[1]))
  151. cv2.circle(img, p, 30, (0,255,0))
  152. cv2.putText(img, str(idd), p, cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 255))
  153. for robot in server.robots():
  154. for line in robot.lines():
  155. p1 = (line[0]*dpi).astype(int)
  156. p2 = (line[1]*dpi).astype(int)
  157. color = (255, 255, 0)
  158. if robot == server.tag:
  159. color = (0, 0, 255)
  160. cv2.line(img, (p1[0], p1[1]), (p2[0], p2[1]), color)
  161. if server.paused:
  162. cv2.putText(img, "PAUSE", (0,30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))
  163. # res = cv2.resize(img,None,fx=.5, fy=.5)
  164. end = time.time()
  165. seconds = end - start
  166. fps = 1.0 / seconds
  167. start = time.time()
  168. if camera:
  169. cv2.putText(img, str(fps), (0,60), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))
  170. res = img
  171. cv2.imshow("warped", res)
  172. else: # if we failed to capture (camera disconnected?), then quit
  173. break
  174. k = cv2.waitKey(1)
  175. if k == 115: # s
  176. server.startGame()
  177. elif k == 112: # p
  178. server.stopGame()
  179. elif k == 113: # q
  180. server.stop()
  181. break
  182. if camera:
  183. cleanup(cam_id)