import cv2 import numpy as np import math from numpy.linalg import norm def getCMarkers (img): markers = [] gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img3 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 125, 10) kernel = np.ones((3,3),np.uint8) img2 = cv2.morphologyEx(img3, cv2.MORPH_CLOSE, kernel) # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3)) # img2 = cv2.dilate(img2,kernel,iterations = 2) kernel = np.ones((3,3),np.uint8) img2 = cv2.dilate(img2,kernel, 2) cv2.imshow("m2", img2) # Setup SimpleBlobDetector parameters. params = cv2.SimpleBlobDetector_Params() params.filterByInertia = False params.filterByConvexity = False # # Change thresholds # params.minThreshold = 240 # params.maxThreshold = 255 # params.thresholdStep = 1 # Filter by Area. params.filterByArea = True params.minArea = 20 params.minDistBetweenBlobs = 1 # Filter by Circularity params.filterByCircularity = True params.minCircularity = 0.2 # # Filter by Convexity # params.filterByConvexity = True # params.minConvexity = 0.90 # Filter by Inertia params.filterByInertia = True params.minInertiaRatio = 0.2 # Create a detector with the parameters detector = cv2.SimpleBlobDetector_create(params) # Detect blobs. keypoints = detector.detect(img2) # img2 = cv2.drawKeypoints(img2, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) # Draw detected blobs as red circles. # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures # the size of the circle corresponds to the size of blob k = [] for point in keypoints: count = [] for p in keypoints: if p == point: continue elif (p.pt[0]-point.pt[0])**2+(p.pt[1]-point.pt[1])**2 <= (point.size*4.5)**2 and (abs(point.size/p.size-1) <= 0.3): count.append(p.pt) if len(count) >= 2: k.append((point.pt, count)) for point in k: p, near = point # distance open the angre and 90 degree midistance = math.pi/12.5 bottom = [] rigth = [] for p1 in near: for p2 in near: if p1 == p2: continue u = np.array([p1[0]-p[0], p1[1]-p[1]]) v = np.array([p2[0]-p[0], p2[1]-p[1]]) c = np.dot(u,v)/norm(u)/norm(v) angle = np.arccos(c) if abs(angle-math.pi/2.0) < midistance: bottom = p1 rigth = p2 midistance = abs(angle-math.pi/2.0) if midistance == math.pi/12.5: continue u = np.array([bottom[0]-p[0], bottom[1]-p[1]]) v = np.array([rigth[0]-p[0], rigth[1]-p[1]]) conner = rigth+u addu = u*1.0/6.0 addv = v*1.0/6.0 conners = [p-addu-addv, bottom+addu-addv, rigth-addu+addv, conner+addu+addv] trans = get_transform_matrix_points(conners, [10, 10], 10) code = cv2.warpPerspective(gray, trans, dsize=(100, 100)) # code = cv2.erode(code, kernel,iterations = 1) # cv2.imshow("m2", code) vsplit = np.vsplit(code, 4) mean = [] for vs in vsplit: m = [] hsplit = np.hsplit(vs, 4) for hs in hsplit: m.append(np.mean(hs)) mean.append(m) # print(np.array(mean).astype(np.uint8)) mean = np.array(mean) >= 100.0 valid = mean[0, 0] == False valid = valid and mean[0, 3] == False valid = valid and mean[0, 3] == False valid = valid and mean[1, 0] == True valid = valid and mean[0, 1] == True valid = valid and mean[2, 0] == True valid = valid and mean[0, 2] == True valid = valid and mean[3, 3] == True valid = valid and mean[1, 3] == True valid = valid and mean[3, 1] == True valid = valid and mean[2, 3] == True valid = valid and mean[3, 2] == True if valid == False: continue number = 0 if not mean[1, 1]: number += 1 if not mean[1, 2]: number += 2 if not mean[2, 1]: number += 4 if not mean[2, 2]: number += 8 uu = np.array([0, 1]) vv = np.array([p[0]-bottom[0], p[1]-bottom[1]]) c = np.dot(uu,vv)/norm(uu)/norm(vv) angle = np.arccos(c) mid = p+u*0.5+v*0.5 if number != 0: markers.append([number, mid, angle]) return markers def get_transform_matrix_points(corners, board_size, dpi): # Read a frame from the video device # Close the calibration window: # cv2.destroyWindow("Calibrate") # If the user selected 4 points if (len(corners) == 4): # Do calibration # src is a list of 4 points on the original image selected by the user # in the order [TOP_LEFT, BOTTOM_LEFT, TOP_RIGHT, BOTTOM_RIGHT] src = np.array(corners, np.float32) # dest is a list of where these 4 points should be located on the # rectangular board (in the same order): dest = np.array( [ (0, 0), (0, board_size[1]*dpi), (board_size[0]*dpi, 0), (board_size[0]*dpi, board_size[1]*dpi) ], np.float32) # Calculate the perspective transform matrix trans = cv2.getPerspectiveTransform(src, dest) # If we were given a calibration filename, save this matrix to a file return trans else: return None