Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Face Detection: replace OpenCV by dlib #25

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions REQUIREMENTS.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,4 @@ numpy==1.12.1
h5py==2.7.0
statistics
opencv2-python==3.2.0
dlib>=19.3.0
15 changes: 10 additions & 5 deletions src/image_emotion_gender_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,16 +6,17 @@

from utils.datasets import get_labels
from utils.inference import detect_faces
from utils.inference import load_detection_model
from utils.inference import make_face_coordinates
from utils.inference import draw_text
from utils.inference import draw_bounding_box
from utils.inference import apply_offsets
from utils.inference import load_detection_model

from utils.inference import load_image
from utils.preprocessor import preprocess_input

# parameters for loading data and images
image_path = sys.argv[1]
detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5'
emotion_labels = get_labels('fer2013')
Expand All @@ -29,7 +30,7 @@
emotion_offsets = (0, 0)

# loading models
face_detection = load_detection_model(detection_model_path)
face_detection = load_detection_model()
emotion_classifier = load_model(emotion_model_path, compile=False)
gender_classifier = load_model(gender_model_path, compile=False)

Expand All @@ -43,8 +44,12 @@
gray_image = np.squeeze(gray_image)
gray_image = gray_image.astype('uint8')

faces = detect_faces(face_detection, gray_image)
for face_coordinates in faces:
detected_faces, score, idx = detect_faces(face_detection, gray_image)

for detected_face in detected_faces:

face_coordinates = make_face_coordinates(detected_face)

x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
rgb_face = rgb_image[y1:y2, x1:x2]

Expand Down
12 changes: 7 additions & 5 deletions src/image_gradcam_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,10 @@
from utils.grad_cam import modify_backprop
from utils.grad_cam import calculate_guided_gradient_CAM
from utils.datasets import get_labels
from utils.inference import detect_faces
from utils.inference import apply_offsets
from utils.inference import detect_faces
from utils.inference import load_detection_model
from utils.inference import make_face_coordinates
from utils.preprocessor import preprocess_input
from utils.inference import draw_bounding_box
from utils.inference import load_image
Expand All @@ -35,20 +36,21 @@
color = (0, 255, 0)

# loading models
detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
model = load_model(model_filename, compile=False)
target_size = model.input_shape[1:3]
face_detection = load_detection_model(detection_model_path)
face_detection = load_detection_model()

# loading images
rgb_image = load_image(image_path, grayscale=False)
gray_image = load_image(image_path, grayscale=True)
gray_image = np.squeeze(gray_image)
gray_image = gray_image.astype('uint8')
faces = detect_faces(face_detection, gray_image)
detected_faces, score, idx = detect_faces(face_detection, gray_image)

# start prediction for every image
for face_coordinates in faces:
for detected_face in detected_faces:

face_coordinates = make_face_coordinates(detected_face)

x1, x2, y1, y2 = apply_offsets(face_coordinates, offsets)
rgb_face = rgb_image[y1:y2, x1:x2]
Expand Down
15 changes: 11 additions & 4 deletions src/utils/inference.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import cv2
import dlib
import matplotlib.pyplot as plt
import numpy as np
from keras.preprocessing import image
Expand All @@ -7,12 +8,18 @@ def load_image(image_path, grayscale=False, target_size=None):
pil_image = image.load_img(image_path, grayscale, target_size)
return image.img_to_array(pil_image)

def load_detection_model(model_path):
detection_model = cv2.CascadeClassifier(model_path)
return detection_model
def load_detection_model():
return dlib.get_frontal_face_detector()

def detect_faces(detection_model, gray_image_array):
return detection_model.detectMultiScale(gray_image_array, 1.3, 5)
return detection_model.run(gray_image_array, 0, 0)

def make_face_coordinates(detected_face):
x = detected_face.left()
y = detected_face.top()
width = detected_face.right() - detected_face.left()
height = detected_face.bottom() - detected_face.top()
return [x, y, width, height]

def draw_bounding_box(face_coordinates, image_array, color):
x, y, w, h = face_coordinates
Expand Down
9 changes: 5 additions & 4 deletions src/video_emotion_color_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@
from utils.inference import draw_bounding_box
from utils.inference import apply_offsets
from utils.inference import load_detection_model
from utils.inference import make_face_coordinates
from utils.preprocessor import preprocess_input

# parameters for loading data and images
detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
emotion_labels = get_labels('fer2013')

Expand All @@ -22,7 +22,7 @@
emotion_offsets = (20, 40)

# loading models
face_detection = load_detection_model(detection_model_path)
face_detection = load_detection_model()
emotion_classifier = load_model(emotion_model_path, compile=False)

# getting input model shapes for inference
Expand All @@ -38,10 +38,11 @@
bgr_image = video_capture.read()[1]
gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
faces = detect_faces(face_detection, gray_image)
detected_faces, score, idx = detect_faces(face_detection, gray_image)

for face_coordinates in faces:
for detected_face in detected_faces:

face_coordinates = make_face_coordinates(detected_face)
x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
gray_face = gray_image[y1:y2, x1:x2]
try:
Expand Down
10 changes: 6 additions & 4 deletions src/video_emotion_gender_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@
from utils.inference import draw_bounding_box
from utils.inference import apply_offsets
from utils.inference import load_detection_model
from utils.inference import make_face_coordinates
from utils.preprocessor import preprocess_input

# parameters for loading data and images
detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5'
emotion_labels = get_labels('fer2013')
Expand All @@ -26,7 +26,7 @@
emotion_offsets = (20, 40)

# loading models
face_detection = load_detection_model(detection_model_path)
face_detection = load_detection_model()
emotion_classifier = load_model(emotion_model_path, compile=False)
gender_classifier = load_model(gender_model_path, compile=False)

Expand All @@ -46,9 +46,11 @@
bgr_image = video_capture.read()[1]
gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
faces = detect_faces(face_detection, gray_image)
detected_faces, score, idx = detect_faces(face_detection, gray_image)

for face_coordinates in faces:
for detected_face in detected_faces:

face_coordinates = make_face_coordinates(detected_face)

x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
rgb_face = rgb_image[y1:y2, x1:x2]
Expand Down
11 changes: 6 additions & 5 deletions src/video_gradcam_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from utils.inference import detect_faces
from utils.inference import apply_offsets
from utils.inference import load_detection_model
from utils.inference import make_face_coordinates
from utils.preprocessor import preprocess_input
from utils.inference import draw_bounding_box
from utils.datasets import get_class_to_arg
Expand Down Expand Up @@ -39,9 +40,8 @@
guided_model = modify_backprop(model, 'GuidedBackProp', task)
saliency_function = compile_saliency_function(guided_model, 'conv2d_7')

# parameters for loading data and images
detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
face_detection = load_detection_model(detection_model_path)
# parameters for loading data and images
face_detection = load_detection_model()
color = (0, 255, 0)

# getting input model shapes for inference
Expand All @@ -57,10 +57,11 @@
bgr_image = video_capture.read()[1]
gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
faces = detect_faces(face_detection, gray_image)
detected_faces, score, idx = detect_faces(face_detection, gray_image)

for face_coordinates in faces:
for detected_face in detected_faces:

face_coordinates = make_face_coordinates(detected_face)
x1, x2, y1, y2 = apply_offsets(face_coordinates, offsets)
gray_face = gray_image[y1:y2, x1:x2]
try:
Expand Down
Loading