How to use the imutils.face_utils function in imutils

To help you get started, we’ve selected a few imutils examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github arnavkj1995 / face_inpainting / preprocess_test_images.py View on Github external
filename = os.path.join(images_dir_path, imgs) 
          
        img = io.imread(filename)
        arr = np.array(img) 
        H, W, C = arr.shape   # we assume that we are getting face cropped images

        # Ask the detector to find the bounding boxes of each face. The 1 in the
        # second argument indicates that we should upsample the image 1 time. This
        # will make everything bigger and allow us to detect more faces.
        dets = detector(img, 1)
        #print("Number of faces detected: {}".format(len(dets)))
        
        for k, d in enumerate(dets):
            # Get the landmarks/parts for the face in box d.
            shape = predictor(img, d)
            shape = face_utils.shape_to_np(shape)
     
            face_part = img[d.top():d.bottom(), d.left():d.right()]
            face_part = imresize(face_part, [128,128])

            key_point_matrix = visualize_facial_landmarks(img, shape)
            key_point_matrix = key_point_matrix[d.top():d.bottom(), d.left():d.right()]
            key_point_matrix = imresize(key_point_matrix, [128,128])

            imsave('test_images/img' + str(counter) + '.png', face_part)
            imsave('test_images/ky' + str(counter) + '.png', key_point_matrix)
github ipsingh06 / ml-desnapify / src / data / make_dataset.py View on Github external
def get_landmarks(self, img):
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faces = self.__detector(gray, 0)
        shape = self.__predictor(gray, faces[0])
        shape = face_utils.shape_to_np(shape)
        return faces[0], shape
github pymit / Memojify / memojifier.py View on Github external
def live_feed():
    emojis = get_emojis()
    while True:
        img = vcam.read()[1]
        gray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        gray_faces = detector(gray_img)
        face = gray_faces[0]

        shape_68 = shape_predictor(img,face)
        shape = face_utils.shape_to_np(shape_68)
        (x,y,w,h) = face_utils.rect_to_bb(face)
        faceAligned = fa.align(img, gray_img, face)
        faceAligned = cv2.resize(faceAligned, (256, 256))

        cv2.imshow('aligned',faceAligned)
        cv2.imshow('face ', img[y:y+h, x:x+w])
        pred_probab , pred_class = keras_predict(model, faceAligned)
        print(pred_probab,pred_class)
        img = blend(img, emojis[pred_class], (x, y, w, h))

        cv2.imshow('img', img)
        keypress = cv2.waitKey(1)

        if keypress%256 == 27:
            print("Escape is pressed, quiting...")
            vcam.release()
github hay / facetool / facetool / landmarks.py View on Github external
# For now, we only deal with the first face with multiple faces
        if nr_of_faces > 1:
            logging.warning("Detected multiple faces, using the first one")

        face = faces[0]
        shape = self.predictor(img, face)

        if self.normalize_coords:
            shape = self._normalize(shape, face)

        if outpath:
            logging.debug(f"Saving to {outpath}")
            out = cv2.imread(path, cv2.IMREAD_COLOR)

            # Also create an image with bounding box and landmarkd dots
            shape_np = face_utils.shape_to_np(shape)

            for (x, y) in shape_np:
                cv2.circle(out, (x, y), 3, (0, 0, 255), -1)

            cv2.imwrite(outpath, out)

        return shape.parts()
github richmondu / libfaceid / libfaceid / liveness.py View on Github external
def __init__(self, path):
        import dlib # lazy loading
        # use dlib 68-point facial landmark
        self._detector = dlib.shape_predictor(path + 'shape_predictor_68_face_landmarks.dat')
        (self._leye_start, self._leye_end) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
        (self._reye_start, self._reye_end) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
        try:
            (self._mouth_start, self._mouth_end) = face_utils.FACIAL_LANDMARKS_IDXS["inner_mouth"]
        except:
            (self._mouth_start, self._mouth_end) = (60, 68)
github raghavpatnecha / Hello-Morse-OpenCV / morse_cv.py View on Github external
self.str = ''
        self.finalString = []
        global L
        self.L = []
        self.closed = False
        self.timer = 0
        self.final = ''
        self.pts = deque(maxlen=512)
        self.thresh = 0.25
        self.dot = 10
        self.dash = 40
        self.detect = dlib.get_frontal_face_detector()
        self.predict = dlib.shape_predictor(
            "shape_predictor_68_face_landmarks.dat")  # Dat file is the crux of the code

        (self.lStart, self.lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
        (self.rStart, self.rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
github iotJumpway / Intel-Examples / Intel-Movidius / TASS / Facenet / WClassifier.py View on Github external
frame = imutils.resize(frame, width=640)
					rawFrame = frame.copy()

					gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
					rects = Classifier.detector(gray, 0)

					for (i, rect) in enumerate(rects):
						# determine the facial landmarks for the face region, then
						# convert the facial landmark (x, y)-coordinates to a NumPy
						# array
						shape = Classifier.predictor(gray, rect)
						shape = face_utils.shape_to_np(shape)

						# convert dlib's rectangle to a OpenCV-style bounding box
						# [i.e., (x, y, w, h)], then draw the face bounding box
						(x, y, w, h) = face_utils.rect_to_bb(rect)
						cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

						# loop over the (x, y)-coordinates for the facial landmarks
						# and draw them on the image
						for (x, y) in shape:
							cv2.circle(frame, (x, y), 1, (0, 255, 0), -1)

						frameWait = 0
						currentFace = rawFrame[
							max(0, rect.top()-100): min(rect.bottom()+100, 480),
							max(0, rect.left()-100): min(rect.right()+100, 640)]
						cv2.imwrite("test.jpg",currentFace)
					
						validDir = Classifier._configs["ClassifierSettings"]["NetworkPath"] + Classifier._configs["ClassifierSettings"]["ValidPath"]

						for valid in os.listdir(validDir):
github TASS-AI / TASS-Facenet / WebCam.py View on Github external
frame = imutils.resize(frame, width=640)
					rawFrame = frame.copy()

					gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
					rects = Classifier.detector(gray, 0)

					for (i, rect) in enumerate(rects):
						# determine the facial landmarks for the face region, then
						# convert the facial landmark (x, y)-coordinates to a NumPy
						# array
						shape = Classifier.predictor(gray, rect)
						shape = face_utils.shape_to_np(shape)

						# convert dlib's rectangle to a OpenCV-style bounding box
						# [i.e., (x, y, w, h)], then draw the face bounding box
						(x, y, w, h) = face_utils.rect_to_bb(rect)
						cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

						# loop over the (x, y)-coordinates for the facial landmarks
						# and draw them on the image
						for (x, y) in shape:
							cv2.circle(frame, (x, y), 1, (0, 255, 0), -1)

						frameWait = 0
						currentFace = rawFrame[
							max(0, rect.top()-100): min(rect.bottom()+100, 480),
							max(0, rect.left()-100): min(rect.right()+100, 640)]
						cv2.imwrite("test.jpg",currentFace)
					
						validDir = Classifier._configs["ClassifierSettings"]["NetworkPath"] + Classifier._configs["ClassifierSettings"]["ValidPath"]

						for valid in os.listdir(validDir):
github lelechen63 / 3d_gan / lrw_data.py View on Github external
detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args.shape_predictor)

    try:
        # load the input image, resize it, and convert it to grayscale
        image = cv2.imread(image_path)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale image
        rects = detector(gray, 1)
        for (i, rect) in enumerate(rects):

            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():
                if name != 'mouth':
                    continue

                (x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))

                center_x = x + int(0.5 * w)

                center_y = y + int(0.5 * h)

                if w > h:
                    r = int(0.65 * w)
                else:
                    r = int(0.65 * h)
                new_x = center_x - r
                new_y = center_y - r
                roi = image[new_y:new_y + 2 * r, new_x:new_x + 2 * r]
github pymit / Memojify / get_images_from_webcam.py View on Github external
vcam = cv2.VideoCapture(0)
vcam.set(cv2.CAP_PROP_FRAME_WIDTH, 480)
vcam.set(cv2.CAP_PROP_FRAME_HEIGHT, 640)

x,y,w,h = 40,50,30,40

while True:
	ret_val , img = vcam.read()
	gray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	gray_faces = detector(gray_img)

	face = gray_faces[0]
	shape_68 = shape_predictor(img,face)
	shape = face_utils.shape_to_np(shape_68)
	(x,y,w,h) = face_utils.rect_to_bb(face)
	clone = img.copy()
	cv2.rectangle(clone, (x-15, y-20), (x+w+20, y+h+10), (255, 0, 0), 1)
	only_face = imutils.resize(img[y-20:y+h+10,x-15:x+w+20],width=150)

	faceAligned = fa.align(img, gray_img, face)

	cv2.imshow('aligned',faceAligned)
	cv2.imshow('img', clone)
	keypress = cv2.waitKey(1)

	if keypress%256 == 27:
		print("Escape is pressed, quiting...")
		break
	elif keypress%256 == 32:
		img_name = "{}.png".format(frame_counter)
		cv2.imwrite(img_path+"/"+ img_name, faceAligned)

imutils

A series of convenience functions to make basic image processing functions such as translation, rotation, resizing, skeletonization, displaying Matplotlib images, sorting contours, detecting edges, and much more easier with OpenCV and both Python 2.7 and Python 3.

MIT
Latest version published 4 years ago

Package Health Score

64 / 100
Full package analysis