Browse Source

Quase acabando

capellaresumo 6 years ago
commit
9f4d61965d

BIN
__pycache__/cmarkers.cpython-36.pyc


BIN
__pycache__/tag.cpython-36.pyc


+ 170 - 0
cmarkers.py

@@ -0,0 +1,170 @@
+import cv2
+import numpy as np
+import math
+from numpy.linalg import norm
+
+def getCMarkers (img):
+    markers = []
+    img3 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+    img3 = cv2.adaptiveThreshold(img3, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 125, 10)
+
+    kernel = np.ones((6,6),np.uint8)
+    img2 = cv2.morphologyEx(img3, cv2.MORPH_CLOSE, kernel)
+
+    # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
+    # img2 = cv2.dilate(img2,kernel,iterations = 2)
+
+    kernel = np.ones((3,3),np.uint8)
+    img2 = cv2.dilate(img2,kernel, 3)
+    # cv2.imshow("m2", img2)
+
+    # Setup SimpleBlobDetector parameters.
+    params = cv2.SimpleBlobDetector_Params()
+    params.filterByInertia = False
+    params.filterByConvexity = False
+
+    # # Change thresholds
+    # params.minThreshold = 240
+    # params.maxThreshold = 255
+    # params.thresholdStep = 1
+
+    # Filter by Area.
+    params.filterByArea = True
+    params.minArea = 20
+    params.minDistBetweenBlobs = 1
+
+    # Filter by Circularity
+    params.filterByCircularity = True
+    params.minCircularity = 0.2
+
+    # # Filter by Convexity
+    # params.filterByConvexity = True
+    # params.minConvexity = 0.90
+        
+    # Filter by Inertia
+    params.filterByInertia = True
+    params.minInertiaRatio = 0.2
+
+    # Create a detector with the parameters
+    detector = cv2.SimpleBlobDetector_create(params)
+
+
+    # Detect blobs.
+    keypoints = detector.detect(img2)
+    # img2 = cv2.drawKeypoints(img2, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
+
+    # Draw detected blobs as red circles.
+    # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures
+    # the size of the circle corresponds to the size of blob
+    k = []
+    for point  in keypoints:
+        count = []
+        for p  in keypoints:
+            if p == point:
+                continue
+            elif (p.pt[0]-point.pt[0])**2+(p.pt[1]-point.pt[1])**2 <= (point.size*4.5)**2 and (abs(point.size/p.size-1) <= 0.3):
+                count.append(p.pt)
+        if len(count) >= 2:
+            k.append((point.pt, count))
+
+    for point in k:
+        p, near = point
+        # distance open the angre and 90 degree
+        midistance = math.pi/15.0
+        bottom = [] 
+        rigth = [] 
+        for p1 in near:
+            for p2 in near:
+                if p1 == p2:
+                    continue
+                u = np.array([p1[0]-p[0], p1[1]-p[1]])
+                v = np.array([p2[0]-p[0], p2[1]-p[1]])
+                c = np.dot(u,v)/norm(u)/norm(v)
+                angle = np.arccos(c)
+                if abs(angle-math.pi/2.0) < midistance:
+                    bottom = p1
+                    rigth = p2
+                    midistance = abs(angle-math.pi/2.0)
+
+        if midistance == math.pi/15.0:
+            continue
+
+        u = np.array([bottom[0]-p[0], bottom[1]-p[1]])
+        v = np.array([rigth[0]-p[0], rigth[1]-p[1]])
+        conner = rigth+u
+        addu = u*1.0/6.0
+        addv = v*1.0/6.0
+        conners = [p-addu-addv, bottom+addu-addv, rigth-addu+addv, conner+addu+addv]
+        trans = get_transform_matrix_points(conners, [10, 10], 10)
+        code = cv2.warpPerspective(img2, trans, dsize=(100, 100))
+        code = cv2.erode(code, kernel,iterations = 1)
+        # cv2.imshow("m2", code)
+
+        vsplit = np.vsplit(code, 4)
+        mean = []
+        for vs in vsplit:
+            m = []
+            hsplit = np.hsplit(vs, 4)
+            for hs in hsplit:
+                m.append(np.mean(hs))
+            mean.append(m)
+        mean = np.array(mean) > 210.0
+        valid = mean[0, 0] == False
+        valid = valid and mean[0, 3] == False
+        valid = valid and mean[0, 3] == False
+        valid = valid and mean[1, 0] == True
+        valid = valid and mean[0, 1] == True
+        valid = valid and mean[2, 0] == True
+        valid = valid and mean[0, 2] == True
+        valid = valid and mean[3, 3] == True
+        valid = valid and mean[1, 3] == True
+        valid = valid and mean[3, 1] == True
+        valid = valid and mean[2, 3] == True
+        valid = valid and mean[3, 2] == True
+        if valid == False:
+            continue
+        number  = 0
+        if not mean[1, 1]:
+            number += 1
+        if not mean[1, 2]:
+            number += 2
+        if not mean[2, 1]:
+            number += 4
+        if not mean[2, 2]:
+            number += 8
+
+        uu = np.array([0, 1])
+        vv = np.array([p[0]-bottom[0], p[1]-bottom[1]])
+        c = np.dot(uu,vv)/norm(uu)/norm(vv)
+        angle = np.arccos(c)
+        mid = p+u*0.5+v*0.5
+        if number != 0:
+            markers.append([number, mid, angle])
+    return markers
+
+
+def get_transform_matrix_points(corners, board_size, dpi):
+    # Read a frame from the video device
+
+    # Close the calibration window:
+    # cv2.destroyWindow("Calibrate")
+ 
+    # If the user selected 4 points
+    if (len(corners) == 4):
+        # Do calibration
+ 
+        # src is a list of 4 points on the original image selected by the user
+        # in the order [TOP_LEFT, BOTTOM_LEFT, TOP_RIGHT, BOTTOM_RIGHT]
+        src = np.array(corners, np.float32)
+ 
+        # dest is a list of where these 4 points should be located on the
+        # rectangular board (in the same order):
+        dest = np.array( [ (0, 0), (0, board_size[1]*dpi), (board_size[0]*dpi, 0), (board_size[0]*dpi, board_size[1]*dpi) ], np.float32)
+ 
+        # Calculate the perspective transform matrix
+        trans = cv2.getPerspectiveTransform(src, dest)
+ 
+        # If we were given a calibration filename, save this matrix to a file
+        return trans
+    else:
+        return None

+ 32 - 0
gem.py

@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+import sys
+from cv2 import imwrite
+
+
+from numpy import mean, binary_repr, ones
+from numpy.random import randint
+from scipy.ndimage import zoom
+
+for i in range(0, 16):
+    img = ones((6, 6))*255
+    img[1, 1] = 0
+    img[4, 1] = 0
+    img[1, 4] = 0
+
+    if i%2 == 1:
+        img[2, 2] = 0
+
+    if (i>>1)%2 == 1:
+        img[2, 3] = 0
+
+    if (i>>2)%2 == 1:
+        img[3, 2] = 0
+
+    if (i>>3)%2 == 1:
+        img[3, 3] = 0
+
+    print (img)
+    marker = zoom(img, zoom=50, order=0)
+
+    imwrite('marker_images/marker_{}.png'.format(i), marker)

BIN
marker_images/marker_0.png


BIN
marker_images/marker_1.png


BIN
marker_images/marker_10.png


BIN
marker_images/marker_11.png


BIN
marker_images/marker_12.png


BIN
marker_images/marker_13.png


BIN
marker_images/marker_14.png


BIN
marker_images/marker_15.png


BIN
marker_images/marker_2.png


BIN
marker_images/marker_3.png


BIN
marker_images/marker_4.png


BIN
marker_images/marker_5.png


BIN
marker_images/marker_6.png


BIN
marker_images/marker_7.png


BIN
marker_images/marker_8.png


BIN
marker_images/marker_9.png


+ 237 - 0
server.py

@@ -0,0 +1,237 @@
+#!/usr/bin/python
+import argparse
+
+import tag
+
+import sys
+import cv2
+import time
+import numpy as np
+import os
+from numpy.linalg import norm
+from cmarkers import getCMarkers, get_transform_matrix_points
+ 
+##
+# Opens a video capture device with a resolution of 800x600
+# at 30 FPS.
+##
+def open_camera(cam_id = 1):
+    cap = cv2.VideoCapture(cam_id)
+    return cap
+ 
+##
+# Gets a frame from an open video device, or returns None
+# if the capture could not be made.
+##
+def get_frame(device):
+    ret, img = device.read()
+    if (ret == False): # failed to capture
+        print >> sys.stderr, "Error capturing from video device."
+        return None
+    return img
+ 
+##
+# Closes all OpenCV windows and releases video capture device
+# before exit.
+##
+def cleanup(cam_id = 0): 
+    cv2.destroyAllWindows()
+    cv2.VideoCapture(cam_id).release()
+ 
+##
+# Creates a new RGB image of the specified size, initially
+# filled with black.
+##
+def new_rgb_image(width, height):
+    image = numpy.zeros( (height, width, 3), numpy.uint8)
+    return image
+
+# Global variable containing the 4 points selected by the user in the corners of the board
+corner_point_list = []
+ 
+##
+# This function is called by OpenCV when the user clicks
+# anywhere in a window displaying an image.
+##
+def mouse_click_callback(event, x, y, flags, param):
+    if event == cv2.EVENT_LBUTTONDOWN:
+        # print ("Click at (%d,%d)" % (x,y))
+        corner_point_list.append( (x,y) )
+
+def find_centers(contours):
+    centers = []
+    for contour in contours:
+        moments = cv2.moments(contour, True)
+ 
+        center = (moments['m10']/moments['m00'] , moments['m01']/moments['m00'])
+        # Convert floating point contour center into an integer so that
+        # we can display it later.
+        center = (int(round(center[0])),int(round(center[1])))
+        centers.append(center)
+    return centers
+
+def find_contours(image):
+    ret,thresh = cv2.threshold(image,127,255,0)
+    image, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
+    return contours
+
+##
+# Computes a perspective transform matrix by capturing a single
+# frame from a video source and displaying it to the user for
+# corner selection.
+#
+# Parameters:
+# * dev: Video Device (from open_camera())
+# * board_size: A tuple/list with 2 elements containing the width and height (respectively) of the gameboard (in arbitrary units, like inches)
+# * dpi: Scaling factor for elements of board_size
+# * calib_file: Optional. If specified, the perspective transform matrix is saved under this filename.
+#   This file can be loaded later to bypass the calibration step (assuming nothing has moved).
+##
+def get_transform_matrix(dev, board_size, dpi, calib_file = None):
+    # Read a frame from the video device
+    img = get_frame(dev)
+ 
+    # Displace image to user
+    cv2.imshow("Calibrate", img)
+ 
+    # Register the mouse callback on this window. When 
+    # the user clicks anywhere in the "Calibrate" window,
+    # the function mouse_click_callback() is called (defined above)
+    cv2.setMouseCallback("Calibrate", mouse_click_callback)
+ 
+    # Wait until the user has selected 4 points
+    while True:
+        # If the user has selected all 4 points, exit loop.
+        if (len(corner_point_list) >= 4):
+            print ("Got 4 points: "+str(corner_point_list))
+            break
+ 
+        # If the user hits a key, exit loop, otherwise remain.
+        if (cv2.waitKey(10) >= 0):
+            break;
+ 
+    # Close the calibration window:
+    cv2.destroyWindow("Calibrate")
+ 
+    # If the user selected 4 points
+    if (len(corner_point_list) >= 4):
+        # Do calibration
+ 
+        # src is a list of 4 points on the original image selected by the user
+        # in the order [TOP_LEFT, BOTTOM_LEFT, TOP_RIGHT, BOTTOM_RIGHT]
+        src = np.array(corner_point_list, np.float32)
+ 
+        # dest is a list of where these 4 points should be located on the
+        # rectangular board (in the same order):
+        dest = np.array( [ (0, 0), (0, board_size[1]*dpi), (board_size[0]*dpi, 0), (board_size[0]*dpi, board_size[1]*dpi) ], np.float32)
+ 
+        # Calculate the perspective transform matrix
+        trans = cv2.getPerspectiveTransform(src, dest)
+ 
+        # If we were given a calibration filename, save this matrix to a file
+        if calib_file:
+            np.savetxt(calib_file, trans)
+        return trans
+    else:
+        return None
+
+def get_green_dots(img_orig):
+    # # Threshold the HSV image to get only blue colors
+    # mask = cv2.inRange(img_orig, lower_blue, upper_blue)
+    b,g,r = cv2.split(img_orig)
+    gg = g - 6
+    green = ((gg > r) & (gg > b) & (g > 120)).astype(np.uint8)*255
+    # green = cv2.cvtColor(green, cv2.COLOR_GRAY2BGR)
+    kernel = np.ones((4,4),np.uint8)
+    opening = cv2.morphologyEx(green, cv2.MORPH_OPEN, kernel)
+
+    img = cv2.bitwise_and(img_orig,img_orig,mask = opening)
+    contours = find_contours(opening)
+    return find_centers(contours)
+
+##################################################### 
+### Calibration Example ###
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+    cam_id = 0
+    dev = open_camera(cam_id)
+    
+    parser.add_argument('-a',
+                        '--automatic',
+                        action='store_false',
+                        help='Enable auto detect green dots (default=False)')
+
+    parser.add_argument('-n',
+                        '--nocamera',
+                        action='store_false',
+                        help='Disable camera, players need to send the position (default=False)')
+
+    args = parser.parse_args()
+    manual = args.automatic
+    camera = args.nocamera
+
+    dev = None
+
+    if camera:
+        dev = open_camera(cam_id)
+ 
+    # The size of the board in inches, measured between the two
+    # robot boundaries:
+    board_size = [84, 84]
+    point = []
+ 
+    # Number of pixels to display per inch in the final transformed image. This
+    # was selected somewhat arbitrarily (I chose 17 because it fit on my screen):
+    dpi = 10
+
+    # Size (in pixels) of the transformed image
+    transform_size = (int(board_size[0]*dpi), int(board_size[1]*dpi))
+ 
+    # Calculate the perspective transform matrix
+    transform = None
+    if manual and camera:
+        transform = get_transform_matrix(dev, board_size, dpi)
+        # transform = get_transform_matrix_points([(327, 75), (280, 475), (805, 141), (805, 568)], board_size, dpi)
+
+    server = tag.TagServer(10318, board_size)
+
+    while True:
+        img_orig = np.zeros(transform_size)
+        if camera:
+            img_orig = get_frame(dev)
+        else:
+            time.sleep(0.01)
+
+        if img_orig is not None: # if we did get an image
+            img = img_orig
+            if camera:
+                # Apply the transformation matrix to skew the image and display it
+                if not manual:
+                    centers = get_green_dots(img_orig)
+                    transform = get_transform_matrix_points(centers, board_size, dpi)
+
+                img = cv2.warpPerspective(img_orig, transform, dsize=transform_size)
+                markers = getCMarkers(img)
+
+                for i in markers:
+                    idd, p, head = i
+                    server.updatePosition(idd, p[0]/dpi, p[1]/dpi, head)
+                    p = (int(p[0]), int(p[1]))
+                    cv2.circle(img, p, 30, (0,255,0))
+                    cv2.putText(img, str(idd), p, cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 255))
+
+            for robot in server.robots():
+                for line in robot.lines():
+                    p1 = (line[0]*dpi).astype(int)
+                    p2 = (line[1]*dpi).astype(int)
+                    cv2.line(img, (p1[0], p1[1]), (p2[0], p2[1]), (255, 255, 0))
+            cv2.imshow("warped", img)
+ 
+        else: # if we failed to capture (camera disconnected?), then quit
+            break
+ 
+        if (cv2.waitKey(2) >= 0):
+            server.stop()
+            break
+ 
+    cleanup(cam_id)

+ 159 - 0
t.py

@@ -0,0 +1,159 @@
+#!/usr/bin/python
+#
+# ENGR421 -- Applied Robotics, Spring 2013
+# OpenCV Python Demo
+# Taj Morton <mortont@onid.orst.edu>
+#
+import sys
+import cv2
+import time
+import numpy
+import os
+ 
+##
+# Opens a video capture device with a resolution of 800x600
+# at 30 FPS.
+##
+def open_camera(cam_id = 1):
+    cap = cv2.VideoCapture(cam_id)
+    # cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 600);
+    # cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 800);
+    # cap.set(cv2.cv.CV_CAP_PROP_FPS, 30);
+    return cap
+ 
+##
+# Gets a frame from an open video device, or returns None
+# if the capture could not be made.
+##
+def get_frame(device):
+    ret, img = device.read()
+    if (ret == False): # failed to capture
+        print >> sys.stderr, "Error capturing from video device."
+        return None
+    return img
+ 
+##
+# Closes all OpenCV windows and releases video capture device
+# before exit.
+##
+def cleanup(cam_id = 0): 
+    cv2.destroyAllWindows()
+    cv2.VideoCapture(cam_id).release()
+ 
+##
+# Creates a new RGB image of the specified size, initially
+# filled with black.
+##
+def new_rgb_image(width, height):
+    image = numpy.zeros( (height, width, 3), numpy.uint8)
+    return image
+
+# Global variable containing the 4 points selected by the user in the corners of the board
+corner_point_list = []
+ 
+##
+# This function is called by OpenCV when the user clicks
+# anywhere in a window displaying an image.
+##
+def mouse_click_callback(event, x, y, flags, param):
+    if event == cv2.EVENT_LBUTTONDOWN:
+        print ("Click at (%d,%d)" % (x,y))
+        corner_point_list.append( (x,y) )
+
+##
+# Computes a perspective transform matrix by capturing a single
+# frame from a video source and displaying it to the user for
+# corner selection.
+#
+# Parameters:
+# * dev: Video Device (from open_camera())
+# * board_size: A tuple/list with 2 elements containing the width and height (respectively) of the gameboard (in arbitrary units, like inches)
+# * dpi: Scaling factor for elements of board_size
+# * calib_file: Optional. If specified, the perspective transform matrix is saved under this filename.
+#   This file can be loaded later to bypass the calibration step (assuming nothing has moved).
+##
+def get_transform_matrix(dev, board_size, dpi, calib_file = None):
+    # Read a frame from the video device
+    img = get_frame(dev)
+ 
+    # Displace image to user
+    cv2.imshow("Calibrate", img)
+ 
+    # Register the mouse callback on this window. When 
+    # the user clicks anywhere in the "Calibrate" window,
+    # the function mouse_click_callback() is called (defined above)
+    cv2.setMouseCallback("Calibrate", mouse_click_callback)
+ 
+    # Wait until the user has selected 4 points
+    while True:
+        # If the user has selected all 4 points, exit loop.
+        if (len(corner_point_list) >= 4):
+            print ("Got 4 points: "+str(corner_point_list))
+            break
+ 
+        # If the user hits a key, exit loop, otherwise remain.
+        if (cv2.waitKey(10) >= 0):
+            break;
+ 
+    # Close the calibration window:
+    cv2.destroyWindow("Calibrate")
+ 
+    # If the user selected 4 points
+    if (len(corner_point_list) >= 4):
+        # Do calibration
+ 
+        # src is a list of 4 points on the original image selected by the user
+        # in the order [TOP_LEFT, BOTTOM_LEFT, TOP_RIGHT, BOTTOM_RIGHT]
+        src = numpy.array(corner_point_list, numpy.float32)
+ 
+        # dest is a list of where these 4 points should be located on the
+        # rectangular board (in the same order):
+        dest = numpy.array( [ (0, 0), (0, board_size[1]*dpi), (board_size[0]*dpi, 0), (board_size[0]*dpi, board_size[1]*dpi) ], numpy.float32)
+ 
+        # Calculate the perspective transform matrix
+        trans = cv2.getPerspectiveTransform(src, dest)
+ 
+        # If we were given a calibration filename, save this matrix to a file
+        if calib_file:
+            numpy.savetxt(calib_file, trans)
+        return trans
+    else:
+        return None
+
+##################################################### 
+### Calibration Example ###
+if __name__ == "__main__":
+    cam_id = 0
+    dev = open_camera(cam_id)
+ 
+    # The size of the board in inches, measured between the two
+    # robot boundaries:
+    board_size = [22.3125, 45]
+ 
+    # Number of pixels to display per inch in the final transformed image. This
+    # was selected somewhat arbitrarily (I chose 17 because it fit on my screen):
+    dpi = 17
+ 
+    # Calculate the perspective transform matrix
+    transform = get_transform_matrix(dev, board_size, dpi)
+ 
+ 
+    # Size (in pixels) of the transformed image
+    transform_size = (int(board_size[0]*dpi), int(board_size[1]*dpi))
+    while True:
+        img_orig = get_frame(dev)
+        if img_orig is not None: # if we did get an image
+            # Show the original (untransformed) image
+            cv2.imshow("video", img_orig)
+            
+            # Apply the transformation matrix to skew the image and display it
+            img = cv2.warpPerspective(img_orig, transform, dsize=transform_size)
+            cv2.imshow("warped", img)
+ 
+        else: # if we failed to capture (camera disconnected?), then quit
+            break
+ 
+        if (cv2.waitKey(2) >= 0):
+            break
+ 
+    cleanup(cam_id)

+ 255 - 0
tag.py

@@ -0,0 +1,255 @@
+import socket
+import sys
+import threading
+import numpy as np
+from socket import error as SocketError
+
+
+class Robot():
+    #        frente
+    #       ---------
+    #       |       |
+    #       |       |
+    #       |  LLL  |
+    #       |  LLL  |
+    #       ---------
+
+    # idd, id do robo
+    # xsize, largura do robo
+    # xsize, comprimento do robo
+    # xtag, distancia horizontal do lado esquerdo ao cento do tag
+    # ytag, distancia vertical do topo ao cento do tag
+    # posx, posy, course, posicao do robo
+    def __init__(self, idd, xsize, ysize, xtag, ytag, posx = 0, posy = 0, course = 0):
+        self.idd = idd
+        self.xsize = xsize
+        self.ysize = ysize
+        self.xtag = xtag
+        self.ytag = ytag
+
+        self.posx = posx
+        self.posy = posy
+        self.course = course
+
+    def lines(self):
+        p1 = np.array([-self.xtag, self.ytag])
+        p2 = np.array([-self.xtag, -self.ysize+self.ytag])
+        p3 = np.array([self.xsize-self.xtag, self.ytag])
+        p4 = np.array([self.xsize-self.xtag, -self.ysize+self.ytag])
+
+        pos = np.array([self.posx, self.posy])
+
+        c, s = np.cos(self.course), np.sin(self.course)
+        R = np.array(((c,-s), (s, c)))
+
+        p1 = p1.dot(R)+pos
+        p2 = p2.dot(R)+pos
+        p3 = p3.dot(R)+pos
+        p4 = p4.dot(R)+pos
+        return np.array([[p1, p2], [p2, p4], [p4, p3], [p1, p3]])
+
+    def ccw(self, A,B,C):
+        return (C[1]-A[1]) * (B[0]-A[0]) > (B[1]-A[1]) * (C[0]-A[0])
+
+    # Return true if line segments AB and CD intersect
+    def line_intersect(self, A,B,C,D):
+        return ccw(A,C,D) != ccw(B,C,D) and ccw(A,B,C) != ccw(A,B,D)
+
+    # get robot and see if this intersect te other
+    def intersect(self, robot):
+        thislines = self.lines()
+        other = robot.lines()
+        for i in thislines:
+            for j in thislines:
+                if line_intersect(i[0], i[1], j[0], j[1]):
+                    return True
+        return False
+
+
+class TagServer(threading.Thread):
+    def __init__(self, port, board_size):
+        threading.Thread.__init__(self)
+
+        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        server_address = ('0.0.0.0', port)
+        print ('Starting up on %s port %s' % server_address)
+        sock.bind(server_address)
+        sock.listen(1)
+
+        sock.settimeout(1)
+        self.sock = sock
+        self.clients_threads = []
+
+        self.tag = None
+        self.board_size = board_size
+
+        self.runV = True
+        self.start()
+
+    def updatePosition (self, idd, posx, posy, course):
+        for thread in self.clients_threads:
+            if thread.robot is not None and thread.robot.idd == idd:
+                msg = "P "
+                msg = msg + str(posx)+","
+                msg = msg + str(posy)+","
+                msg = msg + str(course)+","
+                msg = msg + str(thread.robot.idd)+","
+                msg = msg + str(thread.robot.xsize)+","
+                msg = msg + str(thread.robot.ysize)+","
+                msg = msg + str(thread.robot.xtag)+","
+                msg = msg + str(thread.robot.ytag)
+                thread.robot.posx = posx
+                thread.robot.posy = posy
+                thread.robot.course = course
+                self.brodcast(msg)
+
+        # check for collision
+        for i in self.robots():
+            if i != self.tag:
+                continue
+            for j in self.robots():
+                if i == j:
+                    continue
+                if i.intersect(j):
+                    self.tag = j
+                    self.brodcast("RUN "+str(self.tag.id)+" 10")
+
+    def startGame (self):
+        self.brodcast("STOP")
+
+    def startGame (self):
+        self.brodcast("START")
+        if self.tag is None:
+            tag = np.random.choice(self.robots(), 1)
+            if len(tag) == 1:
+                self.tag = tag
+                self.brodcast("RUN "+str(self.tag.id)+" 10")
+                return True
+        return False
+
+    def brodcast(self, msg):
+        for thread in self.clients_threads:
+            if thread.robot is not None:
+                thread.send(msg+"\n")
+
+    def run(self):
+        while self.runV:
+            try:
+                (clientsock, (ip, port)) = self.sock.accept()
+                newthread = TagThread(ip, port, clientsock, self)
+                self.clients_threads.append(newthread)
+            except socket.timeout as err:
+                pass
+        self.sock.close()
+
+    def robots(self):
+        r = []
+        for thread in self.clients_threads:
+            if thread.robot is not None:
+                r.append(thread.robot)
+        return r
+
+    def stop(self):
+        for thread in self.clients_threads:
+            thread.stop()
+        self.runV = False
+
+
+class TagThread(threading.Thread):
+
+    def __init__(self, ip, port, socket, server):
+        threading.Thread.__init__(self)
+        self.ip = ip
+        self.port = port
+        self.sock = socket
+
+        self.points = 0
+
+        self.robot = None
+        self.bz = server.board_size
+        self.server = server
+
+        socket.settimeout(1)
+        self.runV = True
+        self.start()
+
+
+    def run(self):
+        while self.runV:
+            try:
+                data = self.sock.recv(64).decode('ascii')
+            except socket.timeout as err:
+                continue
+            except SocketError as e:
+                self.robot = None
+                break
+            data = data.split(" ")
+            try:
+                if data[0] == "SUB":
+                    idd, xsize, ysize, xtag, ytag = data[1].split(",")
+                    self.robot = Robot(int(idd), float(xsize), float(ysize), float(xtag), float(ytag))
+                    self.send("OK "+str(self.bz[0])+","+str(self.bz[1])+"\n")
+                if data[0] == "P":
+                    posx, posy, curse = data[1].split(",")
+                    posx, posy, curse = float(posx), float(posy), float(curse)
+                    self.server.updatePosition(self.robot.idd, posx, posy, curse)
+            except Exception as e:
+                print(e)
+                self.send("ERR\n")
+        self.sock.close()
+
+    def send(self, msg):
+        self.sock.sendall(msg.encode('ascii'))
+
+    def stop(self):
+        self.runV = False
+
+class TagClient(threading.Thread):
+    def __init__(sself, ip, port):
+        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        server_address = (ip, port)
+        sock.connect(server_address)
+        self.sock = sock
+        self.stop = None
+        self.start = None
+        self.tagger = None
+        self.position = None
+
+    def info (self, idd, xsize, ysize, xtag, ytag):
+        message = "SUB "+idd+","+xsize+","+ysize+","+xtag+","+ytag+"\n"
+        self.sock.sendall(message.encode('ascii'))
+        self.start()
+
+    def run(self):
+        while True:
+            try:
+                data = self.sock.recv(64).decode('ascii')
+            except SocketError as e:
+                self.robot = None
+                break
+            data = data.split("\n")[0].split(" ")
+            if data[0] == "OK":
+                self.board_size = self.position(data[1].split(","))
+                print("SUB OK")
+            elif data[0] == "P" and self.position is not None:
+                self.position(data[1].split(","))
+
+    def stopCallback (self, function):
+        self.stop = function
+
+    def startCallback (self, function):
+        self.start = function
+
+    def taggerCallback (self, function):
+        self.tagger = function
+
+    def positionCallback (self, function):
+        self.position = function
+
+    def boardinfoCallback (self, function):
+        self.boardinfo = function
+
+    # Somente na simulacao
+    def setPosition(px, py):
+        message = "P "+px+","+py+"\n"
+        self.sock.sendall(message.encode('ascii'))

+ 67 - 0
test.py

@@ -0,0 +1,67 @@
+import numpy as np
+import cv2
+import glob
+
+filename = '1.jpg'
+img = cv2.imread(filename)
+gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
+
+# out1 = cv2.filter2D(gray, -1, np.array(([-1, -2, -1],[0, 0, 0],[1,2,1])))
+# opencvOutput = cv2.filter2D(gray, -1, np.array(([-1, 0, 1],[-2, 0, 2],[-1,0,1])))
+# cv2.imshow('dst',opencvOutput+out1)
+cv2.imshow('dst',img[:,:,0])
+cv2.waitKey(0)
+cv2.imshow('dst',img[:,:,1])
+cv2.waitKey(0)
+cv2.imshow('dst',img[:,:,2])
+cv2.waitKey(0)
+
+gray = np.float32(gray)
+dst = cv2.cornerHarris(gray,2,3,0.04)
+
+#result is dilated for marking the corners, not important
+dst = cv2.dilate(dst,None)
+
+# Threshold for an optimal value, it may vary depending on the image.
+img[dst>0.01*dst.max()]=[0,0,255]
+
+
+if cv2.waitKey(0):
+    cv2.destroyAllWindows()
+
+# # termination criteria
+# criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
+
+# # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
+# objp = np.zeros((6*7,3), np.float32)
+# objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
+
+# # Arrays to store object points and image points from all the images.
+# objpoints = [] # 3d point in real world space
+# imgpoints = [] # 2d points in image plane.
+
+# images = glob.glob('*.jpg')
+
+
+# for fname in images:
+#     img = cv2.imread(fname)
+#     gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
+
+#     cv2.imshow('img',img)
+#     cv2.waitKey(1000)
+#     # # Find the chess board corners
+#     # ret, corners = cv2.findChessboardCorners(gray, (7,6),None)
+
+#     # # If found, add object points, image points (after refining them)
+#     # if ret == True:
+#     #     objpoints.append(objp)
+
+#     #     cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
+#     #     imgpoints.append(corners)
+
+#     #     # Draw and display the corners
+#     #     cv2.drawChessboardCorners(img, (7,6), corners2,ret)
+#     #     cv2.imshow('img',img)
+#     #     cv2.waitKey(500)
+
+# cv2.destroyAllWindows()