cmarkers.py 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. import cv2
  2. import numpy as np
  3. import math
  4. from numpy.linalg import norm
  5. def getCMarkers (img):
  6. markers = []
  7. gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
  8. img3 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 125, 10)
  9. kernel = np.ones((6,6),np.uint8)
  10. img2 = cv2.morphologyEx(img3, cv2.MORPH_CLOSE, kernel)
  11. # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
  12. # img2 = cv2.dilate(img2,kernel,iterations = 2)
  13. kernel = np.ones((3,3),np.uint8)
  14. img2 = cv2.dilate(img2,kernel, 3)
  15. # cv2.imshow("m2", img2)
  16. # Setup SimpleBlobDetector parameters.
  17. params = cv2.SimpleBlobDetector_Params()
  18. params.filterByInertia = False
  19. params.filterByConvexity = False
  20. # # Change thresholds
  21. # params.minThreshold = 240
  22. # params.maxThreshold = 255
  23. # params.thresholdStep = 1
  24. # Filter by Area.
  25. params.filterByArea = True
  26. params.minArea = 20
  27. params.minDistBetweenBlobs = 1
  28. # Filter by Circularity
  29. params.filterByCircularity = True
  30. params.minCircularity = 0.2
  31. # # Filter by Convexity
  32. # params.filterByConvexity = True
  33. # params.minConvexity = 0.90
  34. # Filter by Inertia
  35. params.filterByInertia = True
  36. params.minInertiaRatio = 0.2
  37. # Create a detector with the parameters
  38. detector = cv2.SimpleBlobDetector_create(params)
  39. # Detect blobs.
  40. keypoints = detector.detect(img2)
  41. # img2 = cv2.drawKeypoints(img2, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
  42. # Draw detected blobs as red circles.
  43. # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures
  44. # the size of the circle corresponds to the size of blob
  45. k = []
  46. for point in keypoints:
  47. count = []
  48. for p in keypoints:
  49. if p == point:
  50. continue
  51. elif (p.pt[0]-point.pt[0])**2+(p.pt[1]-point.pt[1])**2 <= (point.size*4.5)**2 and (abs(point.size/p.size-1) <= 0.3):
  52. count.append(p.pt)
  53. if len(count) >= 2:
  54. k.append((point.pt, count))
  55. for point in k:
  56. p, near = point
  57. # distance open the angre and 90 degree
  58. midistance = math.pi/12.5
  59. bottom = []
  60. rigth = []
  61. for p1 in near:
  62. for p2 in near:
  63. if p1 == p2:
  64. continue
  65. u = np.array([p1[0]-p[0], p1[1]-p[1]])
  66. v = np.array([p2[0]-p[0], p2[1]-p[1]])
  67. c = np.dot(u,v)/norm(u)/norm(v)
  68. angle = np.arccos(c)
  69. if abs(angle-math.pi/2.0) < midistance:
  70. bottom = p1
  71. rigth = p2
  72. midistance = abs(angle-math.pi/2.0)
  73. if midistance == math.pi/12.5:
  74. continue
  75. u = np.array([bottom[0]-p[0], bottom[1]-p[1]])
  76. v = np.array([rigth[0]-p[0], rigth[1]-p[1]])
  77. conner = rigth+u
  78. addu = u*1.0/6.0
  79. addv = v*1.0/6.0
  80. conners = [p-addu-addv, bottom+addu-addv, rigth-addu+addv, conner+addu+addv]
  81. trans = get_transform_matrix_points(conners, [10, 10], 10)
  82. code = cv2.warpPerspective(gray, trans, dsize=(100, 100))
  83. # code = cv2.erode(code, kernel,iterations = 1)
  84. # cv2.imshow("m2", code)
  85. vsplit = np.vsplit(code, 4)
  86. mean = []
  87. for vs in vsplit:
  88. m = []
  89. hsplit = np.hsplit(vs, 4)
  90. for hs in hsplit:
  91. m.append(np.mean(hs))
  92. mean.append(m)
  93. # print(np.array(mean).astype(np.uint8))
  94. mean = np.array(mean) >= 100.0
  95. valid = mean[0, 0] == False
  96. valid = valid and mean[0, 3] == False
  97. valid = valid and mean[0, 3] == False
  98. valid = valid and mean[1, 0] == True
  99. valid = valid and mean[0, 1] == True
  100. valid = valid and mean[2, 0] == True
  101. valid = valid and mean[0, 2] == True
  102. valid = valid and mean[3, 3] == True
  103. valid = valid and mean[1, 3] == True
  104. valid = valid and mean[3, 1] == True
  105. valid = valid and mean[2, 3] == True
  106. valid = valid and mean[3, 2] == True
  107. if valid == False:
  108. continue
  109. number = 0
  110. if not mean[1, 1]:
  111. number += 1
  112. if not mean[1, 2]:
  113. number += 2
  114. if not mean[2, 1]:
  115. number += 4
  116. if not mean[2, 2]:
  117. number += 8
  118. uu = np.array([0, 1])
  119. vv = np.array([p[0]-bottom[0], p[1]-bottom[1]])
  120. c = np.dot(uu,vv)/norm(uu)/norm(vv)
  121. angle = np.arccos(c)
  122. mid = p+u*0.5+v*0.5
  123. if number != 0:
  124. markers.append([number, mid, angle])
  125. return markers
  126. def get_transform_matrix_points(corners, board_size, dpi):
  127. # Read a frame from the video device
  128. # Close the calibration window:
  129. # cv2.destroyWindow("Calibrate")
  130. # If the user selected 4 points
  131. if (len(corners) == 4):
  132. # Do calibration
  133. # src is a list of 4 points on the original image selected by the user
  134. # in the order [TOP_LEFT, BOTTOM_LEFT, TOP_RIGHT, BOTTOM_RIGHT]
  135. src = np.array(corners, np.float32)
  136. # dest is a list of where these 4 points should be located on the
  137. # rectangular board (in the same order):
  138. dest = np.array( [ (0, 0), (0, board_size[1]*dpi), (board_size[0]*dpi, 0), (board_size[0]*dpi, board_size[1]*dpi) ], np.float32)
  139. # Calculate the perspective transform matrix
  140. trans = cv2.getPerspectiveTransform(src, dest)
  141. # If we were given a calibration filename, save this matrix to a file
  142. return trans
  143. else:
  144. return None