cmarkers.py 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. import cv2
  2. import numpy as np
  3. import math
  4. validcheck = [[False, True, False, False],
  5. [False, True, True, False],
  6. [False, False, True, False],
  7. [True, False, True, True],
  8. [True, False, True, False],
  9. [True, True, False, False],
  10. [True, True, False, True],
  11. [True, False, False, True]]
  12. def getCMarkers (img):
  13. markers = []
  14. gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
  15. img3 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 125, 10)
  16. kernel = np.ones((6,6),np.uint8)
  17. img2 = cv2.morphologyEx(img3, cv2.MORPH_CLOSE, kernel)
  18. # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
  19. # img2 = cv2.dilate(img2,kernel,iterations = 2)
  20. kernel = np.ones((3,3),np.uint8)
  21. img2 = cv2.dilate(img2,kernel, 2)
  22. # Setup SimpleBlobDetector parameters.
  23. params = cv2.SimpleBlobDetector_Params()
  24. params.filterByInertia = False
  25. params.filterByConvexity = False
  26. # # Change thresholds
  27. # params.minThreshold = 240
  28. # params.maxThreshold = 255
  29. # params.thresholdStep = 1
  30. # Filter by Area.
  31. params.filterByArea = True
  32. params.minArea = 20
  33. params.minDistBetweenBlobs = 1
  34. # Filter by Circularity
  35. params.filterByCircularity = True
  36. params.minCircularity = 0.2
  37. # # Filter by Convexity
  38. # params.filterByConvexity = True
  39. # params.minConvexity = 0.90
  40. # Filter by Inertia
  41. params.filterByInertia = True
  42. params.minInertiaRatio = 0.2
  43. # Create a detector with the parameters
  44. detector = cv2.SimpleBlobDetector_create(params)
  45. # Detect blobs.
  46. keypoints = detector.detect(img2)
  47. # Draw detected blobs as red circles.
  48. # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures
  49. # the size of the circle corresponds to the size of blob
  50. k = []
  51. kk = []
  52. for point in keypoints:
  53. count = []
  54. for p in keypoints:
  55. if p == point:
  56. continue
  57. elif (p.pt[0]-point.pt[0])**2+(p.pt[1]-point.pt[1])**2 <= (point.size*4.5)**2 and (abs(point.size/p.size-1) <= 0.3):
  58. count.append(p.pt)
  59. if len(count) >= 2:
  60. k.append((point.pt, count))
  61. kk.append(point)
  62. for point in k:
  63. p, near = point
  64. # distance open the angre and 90 degree
  65. midistance = math.pi/10.0
  66. bottom = []
  67. rigth = []
  68. for p1 in near:
  69. for p2 in near:
  70. if p1 == p2:
  71. continue
  72. u = np.array([p1[0]-p[0], p1[1]-p[1]])
  73. v = np.array([p2[0]-p[0], p2[1]-p[1]])
  74. angle = np.math.atan2(np.linalg.det([u,v]),np.dot(u,v))
  75. if abs(angle-math.pi/2.0) < math.pi/10.0:
  76. bottom = p1
  77. rigth = p2
  78. conner = rigth+u
  79. addu = u/6.0
  80. addv = v/6.0
  81. conners = [p-addu-addv, bottom+addu-addv, rigth-addu+addv, conner+addu+addv]
  82. trans = get_transform_matrix_points(conners, [10, 10], 10)
  83. code = cv2.warpPerspective(gray, trans, dsize=(100, 100))
  84. number = getNumber(code, 150)
  85. if number == False:
  86. continue
  87. #cv2.imshow("m2", code)
  88. uu = np.array([0, 1])
  89. angle = np.math.atan2(np.linalg.det([v,uu]),np.dot(v,uu))
  90. mid = p+u*0.5+v*0.5
  91. if number != 0:
  92. markers.append([number, mid, angle])
  93. #img2 = cv2.drawKeypoints(img2, kk, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
  94. #cv2.imshow("m3", img2)
  95. return markers
  96. def getNumber(img, threshold):
  97. vsplit = np.vsplit(img, 4)
  98. mean = []
  99. for vs in vsplit:
  100. m = []
  101. hsplit = np.hsplit(vs, 4)
  102. for hs in hsplit:
  103. m.append(np.mean(hs))
  104. mean.append(m)
  105. # print(np.array(mean).astype(np.uint8))
  106. # print(mean)
  107. mean = np.array(mean) >= threshold
  108. valid = mean[0, 0] == False
  109. valid = valid and mean[0, 3] == False
  110. valid = valid and mean[0, 3] == False
  111. valid = valid and mean[1, 0] == True
  112. valid = valid and mean[0, 1] == True
  113. valid = valid and mean[2, 0] == True
  114. valid = valid and mean[0, 2] == True
  115. valid = valid and mean[3, 3] == True
  116. valid = valid and mean[1, 3] == True
  117. valid = valid and mean[3, 1] == True
  118. valid = valid and mean[2, 3] == True
  119. valid = valid and mean[3, 2] == True
  120. if valid == False:
  121. return False
  122. number = 0
  123. if not mean[1, 1]:
  124. number += 1
  125. if not mean[1, 2]:
  126. number += 2
  127. if not mean[2, 1]:
  128. number += 4
  129. if not mean[2, 2]:
  130. number += 8
  131. return number
  132. def get_transform_matrix_points(corners, board_size, dpi):
  133. # Read a frame from the video device
  134. # Close the calibration window:
  135. # cv2.destroyWindow("Calibrate")
  136. # If the user selected 4 points
  137. if (len(corners) == 4):
  138. # Do calibration
  139. # src is a list of 4 points on the original image selected by the user
  140. # in the order [TOP_LEFT, BOTTOM_LEFT, TOP_RIGHT, BOTTOM_RIGHT]
  141. src = np.array(corners, np.float32)
  142. # dest is a list of where these 4 points should be located on the
  143. # rectangular board (in the same order):
  144. dest = np.array( [ (0, 0), (0, board_size[1]*dpi), (board_size[0]*dpi, 0), (board_size[0]*dpi, board_size[1]*dpi) ], np.float32)
  145. # Calculate the perspective transform matrix
  146. trans = cv2.getPerspectiveTransform(src, dest)
  147. # If we were given a calibration filename, save this matrix to a file
  148. return trans
  149. else:
  150. return None