Browse Source

Arrumando marcador

capellaresumo 5 years ago
parent
commit
1ec4551412
2 changed files with 38 additions and 64 deletions
  1. 27 24
      cmarkers.py
  2. 11 40
      server.py

+ 27 - 24
cmarkers.py

@@ -6,19 +6,21 @@ def getCMarkers (img):
     e1 = cv2.getTickCount()
     markers = []
     gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
-    img3 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 25, 10)
-    #cv2.imshow("m3", img3)
-    print("threshold", (cv2.getTickCount() - e1)/ cv2.getTickFrequency())
-    kernel = np.ones((6,6),np.uint8)
+    img3 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 15, 10)
+    
+    ####print("threshold", (cv2.getTickCount() - e1)/ cv2.getTickFrequency())
+    kernel = np.ones((2,2),np.uint8)
     img2 = cv2.morphologyEx(img3, cv2.MORPH_CLOSE, kernel)
-    print("close", (cv2.getTickCount() - e1)/ cv2.getTickFrequency())
-
+    ####print("close", (cv2.getTickCount() - e1)/ cv2.getTickFrequency())
+    # cv2.imshow("m3", img2)
     # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
     # img2 = cv2.dilate(img2,kernel,iterations = 2)
 
-    kernel = np.ones((3,3),np.uint8)
-    img2 = cv2.dilate(img2,kernel, 2)
-    print("dilate", (cv2.getTickCount() - e1)/ cv2.getTickFrequency())
+    kernel = np.ones((2,2),np.uint8)
+    img2 = cv2.dilate(img2,kernel, 1)
+
+    # cv2.imshow("m3", img2)
+    ####print("dilate", (cv2.getTickCount() - e1)/ cv2.getTickFrequency())
     # Setup SimpleBlobDetector parameters.
     params = cv2.SimpleBlobDetector_Params()
     params.filterByInertia = False
@@ -31,16 +33,16 @@ def getCMarkers (img):
 
     # Filter by Area.
     params.filterByArea = True
-    params.minArea = 10
+    params.minArea = 5
     params.minDistBetweenBlobs = 1
 
     # Filter by Circularity
     params.filterByCircularity = True
-    params.minCircularity = 0.2
+    params.minCircularity = 0.5
 
     # # Filter by Convexity
-    # params.filterByConvexity = True
-    # params.minConvexity = 0.90
+    params.filterByConvexity = True
+    params.minConvexity = 0.70
         
     # Filter by Inertia
     params.filterByInertia = True
@@ -55,14 +57,14 @@ def getCMarkers (img):
     keypoints = detector.detect(img2)
 
 
-    print("blob", (cv2.getTickCount() - e1)/ cv2.getTickFrequency())
+    ####print("blob", (cv2.getTickCount() - e1)/ cv2.getTickFrequency())
 
     # Draw detected blobs as red circles.
     # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures
     # the size of the circle corresponds to the size of blob
     k = []
     kk = []
-    #print(len(keypoints))
+    #####print(len(keypoints))
     for point  in keypoints:
         count = []
         for p  in keypoints:
@@ -74,11 +76,11 @@ def getCMarkers (img):
             k.append((point.pt, count))
             kk.append(point)
 
-    print("near", (cv2.getTickCount() - e1)/ cv2.getTickFrequency())
+    ####print("near", (cv2.getTickCount() - e1)/ cv2.getTickFrequency())
     for point in k:
         p, near = point
         # distance open the angre and 90 degree
-        midistance = math.pi/10.0
+        midistance = math.pi/30.0
         bottom = [] 
         rigth = [] 
         for p1 in near:
@@ -88,7 +90,7 @@ def getCMarkers (img):
                 u = np.array([p1[0]-p[0], p1[1]-p[1]])
                 v = np.array([p2[0]-p[0], p2[1]-p[1]])
                 angle = np.math.atan2(np.linalg.det([u,v]),np.dot(u,v))
-                if abs(angle-math.pi/2.0) < math.pi/10.0:
+                if abs(angle-math.pi/2.0) < math.pi/30.0:
                     bottom = p1
                     rigth = p2
 
@@ -99,12 +101,13 @@ def getCMarkers (img):
                     conners = [p-addu-addv, bottom+addu-addv, rigth-addu+addv, conner+addu+addv]
                     trans = get_transform_matrix_points(conners, [10, 10], 10)
                     code = cv2.warpPerspective(gray, trans, dsize=(100, 100))
+                    # code = cv2.adaptiveThreshold(code, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 25, 1)
 
-                    number = getNumber(code, 130)
+                    number = getNumber(code, 160)
                     if number == False:
+                        # cv2.imshow("m2", code)
                         continue
 
-                    #cv2.imshow("m2", code)
 
                     uu = np.array([0, 1])
                     angle = np.math.atan2(np.linalg.det([v,uu]),np.dot(v,uu))
@@ -113,8 +116,8 @@ def getCMarkers (img):
                     if number != 0:
                         markers.append([number, mid, angle])
     
-    #img2 = cv2.drawKeypoints(img2, kk, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
-    #cv2.imshow("m3", img2)
+    img2 = cv2.drawKeypoints(img2, kk, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
+    cv2.imshow("m3", img2)
     return markers
 
 def getNumber(img, threshold):
@@ -126,8 +129,8 @@ def getNumber(img, threshold):
         for hs in hsplit:
             m.append(np.mean(hs))
         mean.append(m)
-    #print(np.array(mean).astype(np.uint8))
-    #print(mean)
+    # print(np.array(mean).astype(np.uint8))
+    #####print(mean)
     mean = np.array(mean) >= threshold
     valid = mean[0, 0] == False
     valid = valid and mean[0, 3] == False

+ 11 - 40
server.py

@@ -36,14 +36,7 @@ def get_frame(device):
 def cleanup(cam_id = 0): 
     cv2.destroyAllWindows()
     cv2.VideoCapture(cam_id).release()
- 
-##
-# Creates a new RGB image of the specified size, initially
-# filled with black.
-##
-def new_rgb_image(width, height):
-    image = numpy.zeros( (height, width, 3), numpy.uint8)
-    return image
+
 
 # Global variable containing the 4 points selected by the user in the corners of the board
 corner_point_list = []
@@ -123,7 +116,7 @@ def get_transform_matrix(dev, board_size, dpi, calib_file = None):
         # dest is a list of where these 4 points should be located on the
         # rectangular board (in the same order):
         dest = np.array( [ (0, 0), (0, board_size[1]*dpi), (board_size[0]*dpi, 0), (board_size[0]*dpi, board_size[1]*dpi) ], np.float32)
- 
+        print(corner_point_list)
         # Calculate the perspective transform matrix
         trans = cv2.getPerspectiveTransform(src, dest)
  
@@ -134,19 +127,6 @@ def get_transform_matrix(dev, board_size, dpi, calib_file = None):
     else:
         return None
 
-def get_green_dots(img_orig):
-    # # Threshold the HSV image to get only blue colors
-    # mask = cv2.inRange(img_orig, lower_blue, upper_blue)
-    b,g,r = cv2.split(img_orig)
-    gg = g - 6
-    green = ((gg > r) & (gg > b) & (g > 120)).astype(np.uint8)*255
-    # green = cv2.cvtColor(green, cv2.COLOR_GRAY2BGR)
-    kernel = np.ones((4,4),np.uint8)
-    opening = cv2.morphologyEx(green, cv2.MORPH_OPEN, kernel)
-
-    img = cv2.bitwise_and(img_orig,img_orig,mask = opening)
-    contours = find_contours(opening)
-    return find_centers(contours)
 
 ##################################################### 
 ### Calibration Example ###
@@ -154,11 +134,6 @@ if __name__ == "__main__":
     parser = argparse.ArgumentParser()
     cam_id = 1
     dev = None
-    
-    parser.add_argument('-a',
-                        '--automatic',
-                        action='store_false',
-                        help='Enable auto detect green dots (default=False)')
 
     parser.add_argument('-n',
                         '--nocamera',
@@ -166,7 +141,6 @@ if __name__ == "__main__":
                         help='Disable camera, players need to send the position (default=False)')
 
     args = parser.parse_args()
-    manual = args.automatic
     camera = args.nocamera
 
     dev = None
@@ -176,21 +150,21 @@ if __name__ == "__main__":
  
     # The size of the board in inches, measured between the two
     # robot boundaries:
-    board_size = [140, 125]
+    board_size = [215, 185]
     point = []
  
     # Number of pixels to display per inch in the final transformed image. This
     # was selected somewhat arbitrarily (I chose 17 because it fit on my screen):
-    dpi = 10
+    dpi = 5
 
     # Size (in pixels) of the transformed image
     transform_size = (int(board_size[0]*dpi), int(board_size[1]*dpi))
  
     # Calculate the perspective transform matrix
     transform = None
-    if manual and camera:
-        transform = get_transform_matrix(dev, board_size, dpi)
-        #transform = get_transform_matrix_points([(74, 20), (76, 399), (445, 4), (530, 368)], board_size, dpi)
+    if camera:
+        # transform = get_transform_matrix(dev, board_size, dpi)
+        transform = get_transform_matrix_points([(340, 10), (78, 1066), (1502, 6), (1806, 1045)], board_size, dpi)
 
     server = tag.TagServer(10318, board_size)
 
@@ -205,10 +179,6 @@ if __name__ == "__main__":
         if img_orig is not None: # if we did get an image
             img = img_orig
             if camera:
-                # Apply the transformation matrix to skew the image and display it
-                if not manual:
-                    centers = get_green_dots(img_orig)
-                    transform = get_transform_matrix_points(centers, board_size, dpi)
 
                 img = cv2.warpPerspective(img_orig, transform, dsize=transform_size)
                 e1 = cv2.getTickCount()
@@ -216,7 +186,7 @@ if __name__ == "__main__":
                 markers = getCMarkers(img)
                 e2 = cv2.getTickCount()
                 tt = (e2 - e1)/ cv2.getTickFrequency()
-                print("mk", tt)
+                # print("mk", tt)
 
                 for i in markers:
                     idd, p, head = i
@@ -235,12 +205,13 @@ if __name__ == "__main__":
                     cv2.line(img, (p1[0], p1[1]), (p2[0], p2[1]), color)
             if server.paused:
                 cv2.putText(img, "PAUSE", (0,30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))
-            res = cv2.resize(img,None,fx=.5, fy=.5)
+            # res = cv2.resize(img,None,fx=.5, fy=.5)
             end = time.time()
             seconds = end - start
             fps  = 1.0 / seconds
             start = time.time()
-            cv2.putText(res, str(fps), (0,60), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))
+            cv2.putText(img, str(fps), (0,60), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))
+            res = img
             cv2.imshow("warped", res)
  
         else: # if we failed to capture (camera disconnected?), then quit