forked from YOUYUANZY/TrainEnv
-
Notifications
You must be signed in to change notification settings - Fork 0
/
first_stage.py
98 lines (79 loc) · 2.95 KB
/
first_stage.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import torch
from torch.autograd import Variable
import math
from PIL import Image
import numpy as np
from .box_utils import nms, _preprocess
def run_first_stage(image, net, scale, threshold):
"""Run P-Net, generate bounding boxes, and do NMS.
Arguments:
image: an instance of PIL.Image.
net: an instance of pytorch's nn.Module, P-Net.
scale: a float number,
scale width and height of the image by this number.
threshold: a float number,
threshold on the probability of a face when generating
bounding boxes from predictions of the net.
Returns:
a float numpy array of shape [n_boxes, 9],
bounding boxes with scores and offsets (4 + 1 + 4).
"""
# scale the image and convert it to a float array
width, height = image.size
sw, sh = math.ceil(width*scale), math.ceil(height*scale)
img = image.resize((sw, sh), Image.BILINEAR)
img = np.asarray(img, 'float32')
with torch.no_grad():
img = Variable(torch.FloatTensor(_preprocess(img)))
output = net(img)
probs = output[1].data.numpy()[0, 1, :, :]
offsets = output[0].data.numpy()
# probs: probability of a face at each sliding window
# offsets: transformations to true bounding boxes
boxes = _generate_bboxes(probs, offsets, scale, threshold)
if len(boxes) == 0:
return None
keep = nms(boxes[:, 0:5], overlap_threshold=0.5)
return boxes[keep]
def _generate_bboxes(probs, offsets, scale, threshold):
"""Generate bounding boxes at places
where there is probably a face.
Arguments:
probs: a float numpy array of shape [n, m].
offsets: a float numpy array of shape [1, 4, n, m].
scale: a float number,
width and height of the image were scaled by this number.
threshold: a float number.
Returns:
a float numpy array of shape [n_boxes, 9]
"""
# applying P-Net is equivalent, in some sense, to
# moving 12x12 window with stride 2
stride = 2
cell_size = 12
# indices of boxes where there is probably a face
inds = np.where(probs > threshold)
if inds[0].size == 0:
return np.array([])
# transformations of bounding boxes
tx1, ty1, tx2, ty2 = [offsets[0, i, inds[0], inds[1]] for i in range(4)]
# they are defined as:
# w = x2 - x1 + 1
# h = y2 - y1 + 1
# x1_true = x1 + tx1*w
# x2_true = x2 + tx2*w
# y1_true = y1 + ty1*h
# y2_true = y2 + ty2*h
offsets = np.array([tx1, ty1, tx2, ty2])
score = probs[inds[0], inds[1]]
# P-Net is applied to scaled images
# so we need to rescale bounding boxes back
bounding_boxes = np.vstack([
np.round((stride*inds[1] + 1.0)/scale),
np.round((stride*inds[0] + 1.0)/scale),
np.round((stride*inds[1] + 1.0 + cell_size)/scale),
np.round((stride*inds[0] + 1.0 + cell_size)/scale),
score, offsets
])
# why one is added?
return bounding_boxes.T