main.py 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. #!/usr/bin/python
  2. import argparse
  3. import tag
  4. import sys
  5. import cv2
  6. import time
  7. import numpy as np
  8. import os
  9. from cmarkers import getCMarkers, get_transform_matrix_points
  10. # open camera
  11. def open_camera(cam_id = 1):
  12. cap = cv2.VideoCapture(cam_id)
  13. return cap
  14. # get frame
  15. def get_frame(device):
  16. ret, img = device.read()
  17. if (ret == False): # failed to capture
  18. print >> sys.stderr, "Error capturing from video device."
  19. return None
  20. return img
  21. # close camera
  22. def cleanup(cam_id = 0):
  23. cv2.destroyAllWindows()
  24. cv2.VideoCapture(cam_id).release()
  25. # 4 points selected by the user in the corners of the board
  26. corner_point_list = []
  27. ##
  28. # This function is called by OpenCV when the user clicks
  29. # anywhere in a window displaying an image.
  30. ##
  31. def mouse_click_callback(event, x, y, flags, param):
  32. if event == cv2.EVENT_LBUTTONDOWN:
  33. # print ("Click at (%d,%d)" % (x,y))
  34. corner_point_list.append( (x,y) )
  35. def find_centers(contours):
  36. centers = []
  37. for contour in contours:
  38. moments = cv2.moments(contour, True)
  39. center = (moments['m10']/moments['m00'] , moments['m01']/moments['m00'])
  40. # Convert floating point contour center into an integer so that
  41. # we can display it later.
  42. center = (int(round(center[0])),int(round(center[1])))
  43. centers.append(center)
  44. return centers
  45. def find_contours(image):
  46. ret,thresh = cv2.threshold(image,127,255,0)
  47. image, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
  48. return contours
  49. ##
  50. # Computes a perspective transform matrix by capturing a single
  51. # frame from a video source and displaying it to the user for
  52. # corner selection.
  53. #
  54. # Parameters:
  55. # * dev: Video Device (from open_camera())
  56. # * board_size: A tuple/list with 2 elements containing the width and height (respectively) of the gameboard (in arbitrary units, like inches)
  57. # * dpi: Scaling factor for elements of board_size
  58. # * calib_file: Optional. If specified, the perspective transform matrix is saved under this filename.
  59. # This file can be loaded later to bypass the calibration step (assuming nothing has moved).
  60. ##
  61. def get_transform_matrix(dev, board_size, dpi, calib_file = None):
  62. # Read a frame from the video device
  63. img = get_frame(dev)
  64. # Displace image to user
  65. text = "[TOP_LEFT, BOTTOM_LEFT, TOP_RIGHT, BOTTOM_RIGHT]"
  66. cv2.putText(img, text, (0,30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))
  67. cv2.imshow("Calibrate", img)
  68. # Register the mouse callback on this window. When
  69. # the user clicks anywhere in the "Calibrate" window,
  70. # the function mouse_click_callback() is called (defined above)
  71. cv2.setMouseCallback("Calibrate", mouse_click_callback)
  72. # Wait until the user has selected 4 points
  73. while True:
  74. # If the user has selected all 4 points, exit loop.
  75. if (len(corner_point_list) >= 4):
  76. print ("Got 4 points: "+str(corner_point_list))
  77. break
  78. # If the user hits a key, exit loop, otherwise remain.
  79. if (cv2.waitKey(10) >= 0):
  80. break;
  81. # Close the calibration window:
  82. cv2.destroyWindow("Calibrate")
  83. # If the user selected 4 points
  84. if (len(corner_point_list) >= 4):
  85. # Do calibration
  86. # src is a list of 4 points on the original image selected by the user
  87. # in the order [TOP_LEFT, BOTTOM_LEFT, TOP_RIGHT, BOTTOM_RIGHT]
  88. src = np.array(corner_point_list, np.float32)
  89. # dest is a list of where these 4 points should be located on the
  90. # rectangular board (in the same order):
  91. a = [ (0, 0), (0, board_size[1]*dpi), (board_size[0]*dpi, 0), (board_size[0]*dpi, board_size[1]*dpi) ]
  92. dest = np.array(a, np.float32)
  93. print(corner_point_list)
  94. # Calculate the perspective transform matrix
  95. trans = cv2.getPerspectiveTransform(src, dest)
  96. # If we were given a calibration filename, save this matrix to a file
  97. if calib_file:
  98. np.savetxt(calib_file, trans)
  99. return trans
  100. else:
  101. return None
  102. if __name__ == "__main__":
  103. parser = argparse.ArgumentParser()
  104. cam_id = 1
  105. dev = None
  106. parser.add_argument('-n',
  107. '--nocamera',
  108. action='store_false',
  109. help='Disable camera, players need to send the position (default=False)')
  110. args = parser.parse_args()
  111. camera = args.nocamera
  112. dev = None
  113. if camera:
  114. dev = open_camera(cam_id)
  115. # The size of the board in inches, measured between the two
  116. # robot boundaries:
  117. board_size = [185, 215]
  118. maplines = [
  119. [40, 40, 40, 80],
  120. [40, 80, 60, 80],
  121. [60, 80, 60, 40],
  122. [60, 40, 40, 40],
  123. [40+70, 40+30, 40+70, 80+30],
  124. [40+70, 80+30, 60+70, 80+30],
  125. [60+70, 80+30, 60+70, 40+30],
  126. [60+70, 40+30, 40+70, 40+30]
  127. ]
  128. point = []
  129. if camera:
  130. board_size = [board_size[1], board_size[0]]
  131. # Number of pixels to display per inch in the final transformed image. This
  132. # was selected somewhat arbitrarily (I chose 17 because it fit on my screen):
  133. dpi = 5
  134. # Size (in pixels) of the transformed image
  135. transform_size = (int(board_size[0]*dpi), int(board_size[1]*dpi))
  136. # Calculate the perspective transform matrix
  137. transform = None
  138. if camera:
  139. # transform = get_transform_matrix(dev, board_size, dpi)
  140. transform = get_transform_matrix_points([(340, 10), (78, 1066), (1502, 6), (1806, 1045)], board_size, dpi)
  141. server = tag.TagServer(10318, board_size, maplines)
  142. while True:
  143. img_orig = None
  144. if camera:
  145. # pega uma imagem da camera
  146. img_orig = get_frame(dev)
  147. else:
  148. # cria uma imagem preta
  149. img_orig = np.zeros([transform_size[0], transform_size[1], 3])
  150. time.sleep(0.1)
  151. start = time.time()
  152. if img_orig is not None: # if we did get an image
  153. img = img_orig
  154. if camera:
  155. img = cv2.warpPerspective(img_orig, transform, dsize=transform_size)
  156. e1 = cv2.getTickCount()
  157. # your code execution
  158. markers = getCMarkers(img)
  159. e2 = cv2.getTickCount()
  160. tt = (e2 - e1)/ cv2.getTickFrequency()
  161. # print("mk", tt)
  162. for i in markers:
  163. idd, p, head = i
  164. server.updatePosition(idd, p[0]/dpi, p[1]/dpi, head)
  165. p = (int(p[0]), int(p[1]))
  166. cv2.circle(img, p, 30, (0,255,0))
  167. cv2.putText(img, str(idd), p, cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 255))
  168. for line in maplines:
  169. color = (255, 255, 255)
  170. line = np.array(line)*dpi
  171. cv2.line(img, (line[0], line[1]), (line[2], line[3]), color)
  172. for robot in server.robots():
  173. for line in robot.lines():
  174. p1 = (line[0]*dpi).astype(int)
  175. p2 = (line[1]*dpi).astype(int)
  176. color = (255, 255, 0)
  177. if robot == server.tag:
  178. color = (0, 0, 255)
  179. cv2.line(img, (p1[0], p1[1]), (p2[0], p2[1]), color)
  180. if server.paused:
  181. cv2.putText(img, "PAUSE", (0,30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))
  182. # res = cv2.resize(img,None,fx=.5, fy=.5)
  183. end = time.time()
  184. seconds = end - start
  185. fps = 1.0 / seconds
  186. start = time.time()
  187. if camera:
  188. cv2.putText(img, str(fps), (0,60), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))
  189. res = img
  190. cv2.imshow("warped", res)
  191. else: # if we failed to capture (camera disconnected?), then quit
  192. break
  193. k = cv2.waitKey(1)
  194. if k == 115: # s
  195. server.startGame()
  196. elif k == 112: # p
  197. server.stopGame()
  198. elif k == 113: # q
  199. server.stop()
  200. break
  201. if camera:
  202. cleanup(cam_id)