Browse Source

Quase acabando

capellaresumo 7 years ago
parent
commit
2c150a3489
4 changed files with 1 additions and 320 deletions
  1. 0 32
      gem.py
  2. 0 159
      t.py
  3. 1 62
      tag.py
  4. 0 67
      test.py

+ 0 - 32
gem.py

@@ -1,32 +0,0 @@
-#!/usr/bin/env python
-
-import sys
-from cv2 import imwrite
-
-
-from numpy import mean, binary_repr, ones
-from numpy.random import randint
-from scipy.ndimage import zoom
-
-for i in range(0, 16):
-    img = ones((6, 6))*255
-    img[1, 1] = 0
-    img[4, 1] = 0
-    img[1, 4] = 0
-
-    if i%2 == 1:
-        img[2, 2] = 0
-
-    if (i>>1)%2 == 1:
-        img[2, 3] = 0
-
-    if (i>>2)%2 == 1:
-        img[3, 2] = 0
-
-    if (i>>3)%2 == 1:
-        img[3, 3] = 0
-
-    print (img)
-    marker = zoom(img, zoom=50, order=0)
-
-    imwrite('marker_images/marker_{}.png'.format(i), marker)

+ 0 - 159
t.py

@@ -1,159 +0,0 @@
-#!/usr/bin/python
-#
-# ENGR421 -- Applied Robotics, Spring 2013
-# OpenCV Python Demo
-# Taj Morton <mortont@onid.orst.edu>
-#
-import sys
-import cv2
-import time
-import numpy
-import os
- 
-##
-# Opens a video capture device with a resolution of 800x600
-# at 30 FPS.
-##
-def open_camera(cam_id = 1):
-    cap = cv2.VideoCapture(cam_id)
-    # cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 600);
-    # cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 800);
-    # cap.set(cv2.cv.CV_CAP_PROP_FPS, 30);
-    return cap
- 
-##
-# Gets a frame from an open video device, or returns None
-# if the capture could not be made.
-##
-def get_frame(device):
-    ret, img = device.read()
-    if (ret == False): # failed to capture
-        print >> sys.stderr, "Error capturing from video device."
-        return None
-    return img
- 
-##
-# Closes all OpenCV windows and releases video capture device
-# before exit.
-##
-def cleanup(cam_id = 0): 
-    cv2.destroyAllWindows()
-    cv2.VideoCapture(cam_id).release()
- 
-##
-# Creates a new RGB image of the specified size, initially
-# filled with black.
-##
-def new_rgb_image(width, height):
-    image = numpy.zeros( (height, width, 3), numpy.uint8)
-    return image
-
-# Global variable containing the 4 points selected by the user in the corners of the board
-corner_point_list = []
- 
-##
-# This function is called by OpenCV when the user clicks
-# anywhere in a window displaying an image.
-##
-def mouse_click_callback(event, x, y, flags, param):
-    if event == cv2.EVENT_LBUTTONDOWN:
-        print ("Click at (%d,%d)" % (x,y))
-        corner_point_list.append( (x,y) )
-
-##
-# Computes a perspective transform matrix by capturing a single
-# frame from a video source and displaying it to the user for
-# corner selection.
-#
-# Parameters:
-# * dev: Video Device (from open_camera())
-# * board_size: A tuple/list with 2 elements containing the width and height (respectively) of the gameboard (in arbitrary units, like inches)
-# * dpi: Scaling factor for elements of board_size
-# * calib_file: Optional. If specified, the perspective transform matrix is saved under this filename.
-#   This file can be loaded later to bypass the calibration step (assuming nothing has moved).
-##
-def get_transform_matrix(dev, board_size, dpi, calib_file = None):
-    # Read a frame from the video device
-    img = get_frame(dev)
- 
-    # Displace image to user
-    cv2.imshow("Calibrate", img)
- 
-    # Register the mouse callback on this window. When 
-    # the user clicks anywhere in the "Calibrate" window,
-    # the function mouse_click_callback() is called (defined above)
-    cv2.setMouseCallback("Calibrate", mouse_click_callback)
- 
-    # Wait until the user has selected 4 points
-    while True:
-        # If the user has selected all 4 points, exit loop.
-        if (len(corner_point_list) >= 4):
-            print ("Got 4 points: "+str(corner_point_list))
-            break
- 
-        # If the user hits a key, exit loop, otherwise remain.
-        if (cv2.waitKey(10) >= 0):
-            break;
- 
-    # Close the calibration window:
-    cv2.destroyWindow("Calibrate")
- 
-    # If the user selected 4 points
-    if (len(corner_point_list) >= 4):
-        # Do calibration
- 
-        # src is a list of 4 points on the original image selected by the user
-        # in the order [TOP_LEFT, BOTTOM_LEFT, TOP_RIGHT, BOTTOM_RIGHT]
-        src = numpy.array(corner_point_list, numpy.float32)
- 
-        # dest is a list of where these 4 points should be located on the
-        # rectangular board (in the same order):
-        dest = numpy.array( [ (0, 0), (0, board_size[1]*dpi), (board_size[0]*dpi, 0), (board_size[0]*dpi, board_size[1]*dpi) ], numpy.float32)
- 
-        # Calculate the perspective transform matrix
-        trans = cv2.getPerspectiveTransform(src, dest)
- 
-        # If we were given a calibration filename, save this matrix to a file
-        if calib_file:
-            numpy.savetxt(calib_file, trans)
-        return trans
-    else:
-        return None
-
-##################################################### 
-### Calibration Example ###
-if __name__ == "__main__":
-    cam_id = 0
-    dev = open_camera(cam_id)
- 
-    # The size of the board in inches, measured between the two
-    # robot boundaries:
-    board_size = [22.3125, 45]
- 
-    # Number of pixels to display per inch in the final transformed image. This
-    # was selected somewhat arbitrarily (I chose 17 because it fit on my screen):
-    dpi = 17
- 
-    # Calculate the perspective transform matrix
-    transform = get_transform_matrix(dev, board_size, dpi)
- 
- 
-    # Size (in pixels) of the transformed image
-    transform_size = (int(board_size[0]*dpi), int(board_size[1]*dpi))
-    while True:
-        img_orig = get_frame(dev)
-        if img_orig is not None: # if we did get an image
-            # Show the original (untransformed) image
-            cv2.imshow("video", img_orig)
-            
-            # Apply the transformation matrix to skew the image and display it
-            img = cv2.warpPerspective(img_orig, transform, dsize=transform_size)
-            cv2.imshow("warped", img)
- 
-        else: # if we failed to capture (camera disconnected?), then quit
-            break
- 
-        if (cv2.waitKey(2) >= 0):
-            break
- 
-    cleanup(cam_id)

+ 1 - 62
tag.py

@@ -3,68 +3,7 @@ import sys
 import threading
 import threading
 import numpy as np
 import numpy as np
 from socket import error as SocketError
 from socket import error as SocketError
-
-
-class Robot():
-    #        frente
-    #       ---------
-    #       |       |
-    #       |       |
-    #       |  LLL  |
-    #       |  LLL  |
-    #       ---------
-
-    # idd, id do robo
-    # xsize, largura do robo
-    # xsize, comprimento do robo
-    # xtag, distancia horizontal do lado esquerdo ao cento do tag
-    # ytag, distancia vertical do topo ao cento do tag
-    # posx, posy, course, posicao do robo
-    def __init__(self, idd, xsize, ysize, xtag, ytag, posx = 0, posy = 0, course = 0):
-        self.idd = idd
-        self.xsize = xsize
-        self.ysize = ysize
-        self.xtag = xtag
-        self.ytag = ytag
-
-        self.posx = posx
-        self.posy = posy
-        self.course = course
-
-    def lines(self):
-        p1 = np.array([-self.xtag, self.ytag])
-        p2 = np.array([-self.xtag, -self.ysize+self.ytag])
-        p3 = np.array([self.xsize-self.xtag, self.ytag])
-        p4 = np.array([self.xsize-self.xtag, -self.ysize+self.ytag])
-
-        pos = np.array([self.posx, self.posy])
-
-        c, s = np.cos(self.course), np.sin(self.course)
-        R = np.array(((c,-s), (s, c)))
-
-        p1 = p1.dot(R)+pos
-        p2 = p2.dot(R)+pos
-        p3 = p3.dot(R)+pos
-        p4 = p4.dot(R)+pos
-        return np.array([[p1, p2], [p2, p4], [p4, p3], [p1, p3]])
-
-    def ccw(self, A,B,C):
-        return (C[1]-A[1]) * (B[0]-A[0]) > (B[1]-A[1]) * (C[0]-A[0])
-
-    # Return true if line segments AB and CD intersect
-    def line_intersect(self, A,B,C,D):
-        return ccw(A,C,D) != ccw(B,C,D) and ccw(A,B,C) != ccw(A,B,D)
-
-    # get robot and see if this intersect te other
-    def intersect(self, robot):
-        thislines = self.lines()
-        other = robot.lines()
-        for i in thislines:
-            for j in thislines:
-                if line_intersect(i[0], i[1], j[0], j[1]):
-                    return True
-        return False
-
+from robot import Robot
 
 
 class TagServer(threading.Thread):
 class TagServer(threading.Thread):
     def __init__(self, port, board_size):
     def __init__(self, port, board_size):

+ 0 - 67
test.py

@@ -1,67 +0,0 @@
-import numpy as np
-import cv2
-import glob
-
-filename = '1.jpg'
-img = cv2.imread(filename)
-gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
-
-# out1 = cv2.filter2D(gray, -1, np.array(([-1, -2, -1],[0, 0, 0],[1,2,1])))
-# opencvOutput = cv2.filter2D(gray, -1, np.array(([-1, 0, 1],[-2, 0, 2],[-1,0,1])))
-# cv2.imshow('dst',opencvOutput+out1)
-cv2.imshow('dst',img[:,:,0])
-cv2.waitKey(0)
-cv2.imshow('dst',img[:,:,1])
-cv2.waitKey(0)
-cv2.imshow('dst',img[:,:,2])
-cv2.waitKey(0)
-
-gray = np.float32(gray)
-dst = cv2.cornerHarris(gray,2,3,0.04)
-
-#result is dilated for marking the corners, not important
-dst = cv2.dilate(dst,None)
-
-# Threshold for an optimal value, it may vary depending on the image.
-img[dst>0.01*dst.max()]=[0,0,255]
-
-
-if cv2.waitKey(0):
-    cv2.destroyAllWindows()
-
-# # termination criteria
-# criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
-
-# # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
-# objp = np.zeros((6*7,3), np.float32)
-# objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
-
-# # Arrays to store object points and image points from all the images.
-# objpoints = [] # 3d point in real world space
-# imgpoints = [] # 2d points in image plane.
-
-# images = glob.glob('*.jpg')
-
-
-# for fname in images:
-#     img = cv2.imread(fname)
-#     gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
-
-#     cv2.imshow('img',img)
-#     cv2.waitKey(1000)
-#     # # Find the chess board corners
-#     # ret, corners = cv2.findChessboardCorners(gray, (7,6),None)
-
-#     # # If found, add object points, image points (after refining them)
-#     # if ret == True:
-#     #     objpoints.append(objp)
-
-#     #     cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
-#     #     imgpoints.append(corners)
-
-#     #     # Draw and display the corners
-#     #     cv2.drawChessboardCorners(img, (7,6), corners2,ret)
-#     #     cv2.imshow('img',img)
-#     #     cv2.waitKey(500)
-
-# cv2.destroyAllWindows()