forked from I3orn2FLY/ActionRecognition
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathDetection.py
executable file
·60 lines (43 loc) · 1.73 KB
/
Detection.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import cv2
import imutils
import numpy as np
import sys
sys.path.insert(0, 'pose_estimation/')
from estimator import TfPoseEstimator
from networks import get_graph_path, model_wh
needed_elements = np.delete(np.arange(0,14,1),[10,13])
class Detector:
def __init__(self,show=True):
model = 'mobilenet_thin'
w, h = model_wh('432x368')
self.estimator = TfPoseEstimator(get_graph_path(model), target_size=(w, h))
self.show = show
def detectCandidates(self,frame):
cands = []
humans = self.estimator.inference(frame)
image_h, image_w = frame.shape[:2]
feat_list = []
for i in range(len(humans)):
if i>=len(humans):
break
keys = humans[i].body_parts.keys()
if len(np.setdiff1d(needed_elements, keys)):
del humans[i]
continue
neck = humans[i].body_parts[1]
lhip = humans[i].body_parts[8]
rhip = humans[i].body_parts[11]
center = (neck.x + lhip.x + rhip.x) / 3, (neck.y + lhip.y + rhip.y)/3
feats = []
for idx in needed_elements:
part = humans[i].body_parts[idx]
feats = feats + [part.x-center[0],part.y-center[1]]
feat_list.append(np.asarray(feats))
center = image_w*center[0],image_h*center[1]
cv2.circle(frame,(int(center[0]),int(center[1])),3,(255,0,0),3)
cands.append(np.asarray(center,dtype=np.float32))
# print feat_list[0]
if (self.show):
frame = TfPoseEstimator.draw_humans(frame, humans, imgcopy=False)
return cands,feat_list,frame
# show some information on the number of bounding boxes