cmarkers.py 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. import cv2
  2. import numpy as np
  3. import math
  4. def getCMarkers (img):
  5. e1 = cv2.getTickCount()
  6. markers = []
  7. gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
  8. img3 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 15, 10)
  9. ####print("threshold", (cv2.getTickCount() - e1)/ cv2.getTickFrequency())
  10. kernel = np.ones((2,2),np.uint8)
  11. img2 = cv2.morphologyEx(img3, cv2.MORPH_CLOSE, kernel)
  12. ####print("close", (cv2.getTickCount() - e1)/ cv2.getTickFrequency())
  13. # cv2.imshow("m3", img2)
  14. # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
  15. # img2 = cv2.dilate(img2,kernel,iterations = 2)
  16. kernel = np.ones((2,2),np.uint8)
  17. img2 = cv2.dilate(img2,kernel, 1)
  18. # cv2.imshow("m3", img2)
  19. ####print("dilate", (cv2.getTickCount() - e1)/ cv2.getTickFrequency())
  20. # Setup SimpleBlobDetector parameters.
  21. params = cv2.SimpleBlobDetector_Params()
  22. params.filterByInertia = False
  23. params.filterByConvexity = False
  24. # # Change thresholds
  25. # params.minThreshold = 240
  26. # params.maxThreshold = 255
  27. # params.thresholdStep = 1
  28. # Filter by Area.
  29. params.filterByArea = True
  30. params.minArea = 5
  31. params.minDistBetweenBlobs = 1
  32. # Filter by Circularity
  33. params.filterByCircularity = True
  34. params.minCircularity = 0.5
  35. # # Filter by Convexity
  36. params.filterByConvexity = True
  37. params.minConvexity = 0.70
  38. # Filter by Inertia
  39. params.filterByInertia = True
  40. params.minInertiaRatio = 0.2
  41. params.filterByColor = False
  42. # Create a detector with the parameters
  43. detector = cv2.SimpleBlobDetector_create(params)
  44. # Detect blobs.
  45. keypoints = detector.detect(img2)
  46. ####print("blob", (cv2.getTickCount() - e1)/ cv2.getTickFrequency())
  47. # Draw detected blobs as red circles.
  48. # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures
  49. # the size of the circle corresponds to the size of blob
  50. k = []
  51. kk = []
  52. #####print(len(keypoints))
  53. for point in keypoints:
  54. count = []
  55. for p in keypoints:
  56. if p == point:
  57. continue
  58. elif (p.pt[0]-point.pt[0])**2+(p.pt[1]-point.pt[1])**2 <= (point.size*4.5)**2 and (abs(point.size/p.size-1) <= 0.3):
  59. count.append(p.pt)
  60. if len(count) >= 2:
  61. k.append((point.pt, count))
  62. kk.append(point)
  63. ####print("near", (cv2.getTickCount() - e1)/ cv2.getTickFrequency())
  64. for point in k:
  65. p, near = point
  66. # distance open the angre and 90 degree
  67. midistance = math.pi/30.0
  68. bottom = []
  69. rigth = []
  70. for p1 in near:
  71. for p2 in near:
  72. if p1 == p2:
  73. continue
  74. u = np.array([p1[0]-p[0], p1[1]-p[1]])
  75. v = np.array([p2[0]-p[0], p2[1]-p[1]])
  76. angle = np.math.atan2(np.linalg.det([u,v]),np.dot(u,v))
  77. if abs(angle-math.pi/2.0) < math.pi/30.0:
  78. bottom = p1
  79. rigth = p2
  80. conner = rigth+u
  81. addu = u/6.0
  82. addv = v/6.0
  83. conners = [p-addu-addv, bottom+addu-addv, rigth-addu+addv, conner+addu+addv]
  84. trans = get_transform_matrix_points(conners, [10, 10], 10)
  85. code = cv2.warpPerspective(gray, trans, dsize=(100, 100))
  86. # code = cv2.adaptiveThreshold(code, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 25, 1)
  87. number = getNumber(code, 160)
  88. if number == False:
  89. continue
  90. # cv2.imshow("m2", code)
  91. uu = np.array([0, 1])
  92. angle = np.math.atan2(np.linalg.det([v,uu]),np.dot(v,uu))
  93. mid = p+u*0.5+v*0.5
  94. if number != 0:
  95. markers.append([number, mid, angle])
  96. img2 = cv2.drawKeypoints(img2, kk, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
  97. # cv2.imshow("m3", img2)
  98. return markers
  99. def getNumber(img, threshold):
  100. vsplit = np.vsplit(img, 4)
  101. mean = []
  102. for vs in vsplit:
  103. m = []
  104. hsplit = np.hsplit(vs, 4)
  105. for hs in hsplit:
  106. m.append(np.mean(hs))
  107. mean.append(m)
  108. # print(np.array(mean).astype(np.uint8))
  109. # print((mean[0][0]+mean[3][3])/2.0)
  110. threshold = (mean[0][0]+mean[3][3])/2.0*0.85
  111. print(threshold)
  112. mean = np.array(mean) >= threshold
  113. valid = mean[0, 0] == False
  114. valid = valid and mean[0, 3] == False
  115. valid = valid and mean[0, 3] == False
  116. valid = valid and mean[1, 0] == True
  117. valid = valid and mean[0, 1] == True
  118. valid = valid and mean[2, 0] == True
  119. valid = valid and mean[0, 2] == True
  120. valid = valid and mean[3, 3] == True
  121. valid = valid and mean[1, 3] == True
  122. valid = valid and mean[3, 1] == True
  123. valid = valid and mean[2, 3] == True
  124. valid = valid and mean[3, 2] == True
  125. if valid == False:
  126. return False
  127. number = 0
  128. if not mean[1, 1]:
  129. number += 1
  130. if not mean[1, 2]:
  131. number += 2
  132. if not mean[2, 1]:
  133. number += 4
  134. if not mean[2, 2]:
  135. number += 8
  136. return number
  137. def get_transform_matrix_points(corners, board_size, dpi):
  138. # Read a frame from the video device
  139. # Close the calibration window:
  140. # cv2.destroyWindow("Calibrate")
  141. # If the user selected 4 points
  142. if (len(corners) == 4):
  143. # Do calibration
  144. # src is a list of 4 points on the original image selected by the user
  145. # in the order [TOP_LEFT, BOTTOM_LEFT, TOP_RIGHT, BOTTOM_RIGHT]
  146. src = np.array(corners, np.float32)
  147. # dest is a list of where these 4 points should be located on the
  148. # rectangular board (in the same order):
  149. dest = np.array( [ (0, 0), (0, board_size[1]*dpi), (board_size[0]*dpi, 0), (board_size[0]*dpi, board_size[1]*dpi) ], np.float32)
  150. # Calculate the perspective transform matrix
  151. trans = cv2.getPerspectiveTransform(src, dest)
  152. # If we were given a calibration filename, save this matrix to a file
  153. return trans
  154. else:
  155. return None