How to use the imutils.face_utils.rect_to_bb function in imutils

To help you get started, we’ve selected a few imutils examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github devshaaran / Emotion_Recognition / main.py View on Github external
video_capture = cv2.VideoCapture(0)

    while True:
        
        ret, image = video_capture.read()
        #image = cv2.imread(args["image"])
        image = imutils.resize(image, width=500)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        # detect faces in the grayscale image
        rects = detector(gray, 1)
        
        for (i, rect) in enumerate(rects):

            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)
            (x, y, w, h) = face_utils.rect_to_bb(rect)
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
            cropped_image = image[x:x+w,y:y+h]
            cv2.putText(image, "Face #{}".format(i + 1), (x - 10, y - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            for (x, y) in shape:
                cv2.circle(image, (x, y), 1, (0, 0, 255), -1)
            cv2.imshow('cropped', cropped_image)
            cv2.imwrite('0.jpg',cropped_image)
                
        cv2.imshow("Output", image) 
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cv2.destroyAllWindows()
    video_capture.release()
github zaraanry / face-emotion-recognition / emotions.py View on Github external
else:
    cap = cv2.VideoCapture('./test/testvdo.mp4') # Video file source

while cap.isOpened(): # True:
    ret, bgr_image = cap.read()

    #bgr_image = video_capture.read()[1]

    gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
    rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)

    faces = detector(rgb_image)

    for face_coordinates in faces:

        x1, x2, y1, y2 = apply_offsets(face_utils.rect_to_bb(face_coordinates), emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]
        try:
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        gray_face = preprocess_input(gray_face, True)
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_prediction = emotion_classifier.predict(gray_face)
        emotion_probability = np.max(emotion_prediction)
        emotion_label_arg = np.argmax(emotion_prediction)
        emotion_text = emotion_labels[emotion_label_arg]
        emotion_window.append(emotion_text)

        if len(emotion_window) > frame_window:
github mohitwildbeast / Facial-Recognition-Using-FaceNet-Siamese-One-Shot-Learning / Image_Dataset_Generator.py View on Github external
number_of_images = 0
MAX_NUMBER_OF_IMAGES = 50
count = 0

while number_of_images < MAX_NUMBER_OF_IMAGES:
	ret, frame = video_capture.read()

	frame = cv2.flip(frame, 1)

	frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

	#faces = face_cascade.detectMultiScale(frame, 1.3, 5)
	faces = detector(frame_gray)
	if len(faces) == 1:
		face = faces[0]
		(x, y, w, h) = face_utils.rect_to_bb(face)
		face_img = frame_gray[y-50:y + h+100, x-50:x + w+100]
		face_aligned = face_aligner.align(frame, frame_gray, face)

		if count == 5:
			cv2.imwrite(os.path.join(directory, str(name+str(number_of_images)+'.jpg')), face_aligned)
			number_of_images += 1
			count = 0
		print(count)
		count+=1
		

	cv2.imshow('Video', frame)

	if(cv2.waitKey(1) & 0xFF == ord('q')):
		break
github habom2310 / People-tracking-with-Age-and-Gender-detection / detect_and_tracker.py View on Github external
ret,frame = cap.read()
    img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    frame = imutils.resize(frame, width=400)
    if W is None or H is None:
        (H, W) = frame.shape[:2]
    
    detector = dlib.get_frontal_face_detector()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    # detect faces in the grayscale image
    rects = detector(gray, 0)

    new_rects = []

    for rect in rects:
        (x, y, w, h) = face_utils.rect_to_bb(rect)
        if y<0:
            print("a")
            continue
        new_rects.append((x, y, x + w, y + h))

        face_img = frame[y:y+h, x:x+w].copy()

        blob2 = cv2.dnn.blobFromImage(face_img, 1, (227, 227), MODEL_MEAN_VALUES, swapRB=False)
        
        if cnt%10 == 0:
            # Predict gender
            gender_net.setInput(blob2)
            gender_preds = gender_net.forward()
            gender = gender_list[gender_preds[0].argmax()]
            # Predict age
            age_net.setInput(blob2)
github hay / facetool / facetool / classify.py View on Github external
fa = FaceAligner(predictor, desiredFaceWidth=160)
        image = cv2.imread(image_path, cv2.IMREAD_COLOR)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        rects = detector(gray, 2)
        rect_nums = len(rects)
        XY, aligned_images = [], []

        if rect_nums == 0:
            aligned_images.append(image)

            return aligned_images, image, rect_nums, XY
        else:
            for i in range(rect_nums):
                aligned_image = fa.align(image, gray, rects[i])
                aligned_images.append(aligned_image)
                (x, y, w, h) = rect_to_bb(rects[i])
                image = cv2.rectangle(image, (x, y), (x + w, y + h), color=(255, 0, 0), thickness=2)
                XY.append((x, y))

            return np.array(aligned_images), image, rect_nums, XY
github rishikksh20 / Liveness-Detection / opencv_video.py View on Github external
else :
    		event2="Mouth Close"

    	cv2.line(rame, tuple(shape[62]), tuple(shape[66]), (180, 42, 220), 2)
    	cv2.line(rame, tuple(shape[49]), tuple(shape[59]), (180, 42, 220), 2)
    	cv2.line(rame, tuple(shape[1]), tuple(shape[28]), (19, 199, 109), 2)
    	cv2.line(rame, tuple(shape[28]), tuple(shape[17]), (19, 199, 109), 2)
    	if ratio<0.6:
    		event="Right turn"
    	elif ratio>1.6:
    		event="Left turn"
    	else :
    		event="none"
    	# convert dlib's rectangle to a OpenCV-style bounding box
    	# [i.e., (x, y, w, h)], then draw the face bounding box
    	(x, y, w, h) = face_utils.rect_to_bb(rect)
    	cv2.rectangle(rame, (x, y), (x + w, y + h), (0, 255, 0), 2)

    	# show the face number
    	cv2.putText(rame, "Face #{}".format(i + 1), (x - 10, y - 10),
    		cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

    	cv2.putText(rame, "Ratio: {}--{}".format(event,event2), (10, 30),
			cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

    	# loop over the (x, y)-coordinates for the facial landmarks
    	# and draw them on the image
    	for (x, y) in shape:
    		cv2.circle(rame, (x, y), 1, (0, 0, 255), -1)
github habom2310 / Heart-rate-measurement-using-camera / new_update / face_utilities.py View on Github external
shape (array): facial landmarks' co-ords in format of tuples (x,y)
            aligned_face (cv2 image): face after alignment
            aligned_shape (array): facial landmarks' co-ords of the aligned face in format of tuples (x,y)
        
        '''
        if(type=="5"):
            shape, rects = self.get_landmarks(frame, "5")
            
            if shape is None:
                return None
        else:    
            shape, rects = self.get_landmarks(frame, "68")
            if shape is None:
                return None
        
        (x, y, w, h) = face_utils.rect_to_bb(rects[0])
        
        face = frame[y:y+h,x:x+w]
        aligned_face,aligned_shape = self.face_alignment(frame, shape)
        
        # if(type=="5"):
            # aligned_shape, rects_2 = self.get_landmarks(aligned_face, "5")
            # if aligned_shape is None:
                # return None
        # else:    
            # aligned_shape, rects_2 = self.get_landmarks(aligned_face, "68")
            # if aligned_shape is None:
                # return None
                
        return rects, face, shape, aligned_face, aligned_shape
github GeniSysAI / Vision / Local / LocalStreamer.py View on Github external
_, frame = TASS.OCVframe.read()
        frame    = cv2.resize(frame, (640, 480)) 
        rawFrame = frame.copy()

        gray     = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rects    = TASS.detector(gray, 0)
            
        for (i, rect) in enumerate(rects):
            
            shape = face_utils.shape_to_np(
                TASS.predictor(
                    gray,
                    rect))
            
            (x, y, w, h) = face_utils.rect_to_bb(rect)
            
            cv2.rectangle(
                frame, 
                (x, y), 
                (x + w, y + h), 
                (0, 255, 0), 
                2)
                
            for (x, y) in shape:
                
                cv2.circle(
                    frame, 
                    (x, y), 
                    1, 
                    (0, 255, 0), 
                    -1)
github pymit / Memojify / memojifier.py View on Github external
def live_feed():
    emojis = get_emojis()
    while True:
        img = vcam.read()[1]
        gray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        gray_faces = detector(gray_img)
        face = gray_faces[0]

        shape_68 = shape_predictor(img,face)
        shape = face_utils.shape_to_np(shape_68)
        (x,y,w,h) = face_utils.rect_to_bb(face)
        faceAligned = fa.align(img, gray_img, face)
        faceAligned = cv2.resize(faceAligned, (256, 256))

        cv2.imshow('aligned',faceAligned)
        cv2.imshow('face ', img[y:y+h, x:x+w])
        pred_probab , pred_class = keras_predict(model, faceAligned)
        print(pred_probab,pred_class)
        img = blend(img, emojis[pred_class], (x, y, w, h))

        cv2.imshow('img', img)
        keypress = cv2.waitKey(1)

        if keypress%256 == 27:
            print("Escape is pressed, quiting...")
            vcam.release()
            cv2.destroyAllWindows()
github mauckc / headpose / python / save-video-pose.py View on Github external
# Convert image to grayscale
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # Process The Image
        size = gray.shape
        # Detect faces
        rects = detector(gray,0)
        # check to see if a face was detected, and if so, draw the total
        # number of faces on the frame
        if len(rects) > 0:
            text = "{} face(s) found".format(len(rects))
            cv2.putText(frame, text, (10, 20), cv2.FONT_HERSHEY_SIMPLEX,0.5, (0, 0, 255), 2)
        # loop over the face detections
            for rect in rects:
                # compute the bounding box of the face and draw it on the
                # frame
                (bX, bY, bW, bH) = face_utils.rect_to_bb(rect)
                cv2.rectangle(frame, (bX, bY), (bX + bW, bY + bH),(0, 255, 0), 1)
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
                shape = predictor(gray, rect)
                shape = face_utils.shape_to_np(shape)
                # loop over the (x, y)-coordinates for the facial landmarks
                # and draw each of them
                for (i, (x, y)) in enumerate(shape):
                    if i == 33:
                    #something to our key landmarks
                    # save to our new key point list
                    # i.e. keypoints = [(i,(x,y))]
				    image_points[0] = np.array([x,y],dtype='double')
                    # write on frame in Green
				    cv2.circle(frame, (x, y), 1, (0, 255, 0), -1)

imutils

A series of convenience functions to make basic image processing functions such as translation, rotation, resizing, skeletonization, displaying Matplotlib images, sorting contours, detecting edges, and much more easier with OpenCV and both Python 2.7 and Python 3.

MIT
Latest version published 4 years ago

Package Health Score

64 / 100
Full package analysis