cmarkers.py 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. import cv2
  2. import numpy as np
  3. import math
  4. from numpy.linalg import norm
  5. def getCMarkers (img):
  6. markers = []
  7. gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
  8. img3 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 125, 10)
  9. kernel = np.ones((6,6),np.uint8)
  10. img2 = cv2.morphologyEx(img3, cv2.MORPH_CLOSE, kernel)
  11. # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
  12. # img2 = cv2.dilate(img2,kernel,iterations = 2)
  13. kernel = np.ones((3,3),np.uint8)
  14. img2 = cv2.dilate(img2,kernel, 2)
  15. # Setup SimpleBlobDetector parameters.
  16. params = cv2.SimpleBlobDetector_Params()
  17. params.filterByInertia = False
  18. params.filterByConvexity = False
  19. # # Change thresholds
  20. # params.minThreshold = 240
  21. # params.maxThreshold = 255
  22. # params.thresholdStep = 1
  23. # Filter by Area.
  24. params.filterByArea = True
  25. params.minArea = 20
  26. params.minDistBetweenBlobs = 1
  27. # Filter by Circularity
  28. params.filterByCircularity = True
  29. params.minCircularity = 0.2
  30. # # Filter by Convexity
  31. # params.filterByConvexity = True
  32. # params.minConvexity = 0.90
  33. # Filter by Inertia
  34. params.filterByInertia = True
  35. params.minInertiaRatio = 0.2
  36. # Create a detector with the parameters
  37. detector = cv2.SimpleBlobDetector_create(params)
  38. # Detect blobs.
  39. keypoints = detector.detect(img2)
  40. # Draw detected blobs as red circles.
  41. # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures
  42. # the size of the circle corresponds to the size of blob
  43. k = []
  44. kk = []
  45. for point in keypoints:
  46. count = []
  47. for p in keypoints:
  48. if p == point:
  49. continue
  50. elif (p.pt[0]-point.pt[0])**2+(p.pt[1]-point.pt[1])**2 <= (point.size*4.5)**2 and (abs(point.size/p.size-1) <= 0.3):
  51. count.append(p.pt)
  52. if len(count) >= 2:
  53. k.append((point.pt, count))
  54. kk.append(point)
  55. img2 = cv2.drawKeypoints(img2, kk, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
  56. cv2.imshow("m3", img2)
  57. for point in k:
  58. p, near = point
  59. # distance open the angre and 90 degree
  60. midistance = math.pi/10.0
  61. bottom = []
  62. rigth = []
  63. for p1 in near:
  64. for p2 in near:
  65. if p1 == p2:
  66. continue
  67. u = np.array([p1[0]-p[0], p1[1]-p[1]])
  68. v = np.array([p2[0]-p[0], p2[1]-p[1]])
  69. c = np.dot(u,v)/norm(u)/norm(v)
  70. angle = np.arccos(c)
  71. if abs(angle) > midistance:
  72. bottom = p1
  73. rigth = p2
  74. midistance = abs(angle-math.pi/2.0)
  75. if midistance == math.pi/10:
  76. continue
  77. u = np.array([bottom[0]-p[0], bottom[1]-p[1]])
  78. v = np.array([rigth[0]-p[0], rigth[1]-p[1]])
  79. if u[1] > v[1]:
  80. u,v = v,u
  81. conner = rigth+u
  82. addu = u*1.0/6.0
  83. addv = v*1.0/6.0
  84. conners = [p-addu-addv, bottom+addu-addv, rigth-addu+addv, conner+addu+addv]
  85. trans = get_transform_matrix_points(conners, [10, 10], 10)
  86. code = cv2.warpPerspective(gray, trans, dsize=(100, 100))
  87. # code = cv2.erode(code, kernel,iterations = 1)
  88. cv2.imshow("m2", code)
  89. number = getNumber(code, 150)
  90. if number == False:
  91. continue
  92. uu = np.array([0, 1])
  93. vv = np.array([p[0]-bottom[0], p[1]-bottom[1]])
  94. c = np.dot(uu,vv)/norm(uu)/norm(vv)
  95. angle = np.arccos(c)
  96. mid = p+u*0.5+v*0.5
  97. if number != 0:
  98. markers.append([number, mid, angle])
  99. return markers
  100. def getNumber(img, threshold):
  101. vsplit = np.vsplit(img, 4)
  102. mean = []
  103. for vs in vsplit:
  104. m = []
  105. hsplit = np.hsplit(vs, 4)
  106. for hs in hsplit:
  107. m.append(np.mean(hs))
  108. mean.append(m)
  109. # print(np.array(mean).astype(np.uint8))
  110. # print(mean)
  111. mean = np.array(mean) >= threshold
  112. valid = mean[0, 0] == False
  113. valid = valid and mean[0, 3] == False
  114. valid = valid and mean[0, 3] == False
  115. valid = valid and mean[1, 0] == True
  116. valid = valid and mean[0, 1] == True
  117. valid = valid and mean[2, 0] == True
  118. valid = valid and mean[0, 2] == True
  119. valid = valid and mean[3, 3] == True
  120. valid = valid and mean[1, 3] == True
  121. valid = valid and mean[3, 1] == True
  122. valid = valid and mean[2, 3] == True
  123. valid = valid and mean[3, 2] == True
  124. if valid == False:
  125. return False
  126. number = 0
  127. if not mean[1, 1]:
  128. number += 1
  129. if not mean[1, 2]:
  130. number += 2
  131. if not mean[2, 1]:
  132. number += 4
  133. if not mean[2, 2]:
  134. number += 8
  135. return number
  136. def get_transform_matrix_points(corners, board_size, dpi):
  137. # Read a frame from the video device
  138. # Close the calibration window:
  139. # cv2.destroyWindow("Calibrate")
  140. # If the user selected 4 points
  141. if (len(corners) == 4):
  142. # Do calibration
  143. # src is a list of 4 points on the original image selected by the user
  144. # in the order [TOP_LEFT, BOTTOM_LEFT, TOP_RIGHT, BOTTOM_RIGHT]
  145. src = np.array(corners, np.float32)
  146. # dest is a list of where these 4 points should be located on the
  147. # rectangular board (in the same order):
  148. dest = np.array( [ (0, 0), (0, board_size[1]*dpi), (board_size[0]*dpi, 0), (board_size[0]*dpi, board_size[1]*dpi) ], np.float32)
  149. # Calculate the perspective transform matrix
  150. trans = cv2.getPerspectiveTransform(src, dest)
  151. # If we were given a calibration filename, save this matrix to a file
  152. return trans
  153. else:
  154. return None