Skip to content

Commit a254cdf

Browse files
committed
Adding source code for detection
1 parent a90a9e3 commit a254cdf

File tree

1 file changed

+115
-0
lines changed

1 file changed

+115
-0
lines changed

corgi_detection.py

+115
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
from object_detection.utils import label_map_util
2+
from object_detection.utils import visualization_utils as vis_util
3+
from matplotlib import pyplot as plt
4+
from PIL import Image
5+
6+
import numpy as np
7+
import os
8+
import sys
9+
import tensorflow as tf
10+
import matplotlib
11+
matplotlib.use('MacOSX') # Change to relevant OS, check matplotlib backend
12+
13+
# This is needed since the notebook is stored in the object_detection folder.
14+
sys.path.append("..")
15+
from object_detection.utils import ops as utils_ops
16+
17+
18+
# Path to labels
19+
MODEL_NAME = '/Users/hluong/PycharmProjects/CorgiRecognition/models/research/object_detection/inference_graph_corgi'
20+
PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'
21+
PATH_TO_LABELS = '/Users/hluong/PycharmProjects/CorgiRecognition/models/research/object_detection/corgi_training/corgi.pbtxt'
22+
23+
# Initialize a tensorflow graph
24+
detection_graph = tf.Graph()
25+
with detection_graph.as_default():
26+
od_graph_def = tf.GraphDef()
27+
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
28+
serialized_graph = fid.read()
29+
od_graph_def.ParseFromString(serialized_graph)
30+
tf.import_graph_def(od_graph_def, name='')
31+
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
32+
33+
34+
# Load images to a numpy array
35+
def load_image_into_numpy_array(image):
36+
(im_width, im_height) = image.size
37+
return np.array(image.getdata()).reshape(
38+
(im_height, im_width, 3)).astype(np.uint8)
39+
40+
41+
# For the sake of simplicity we will use only 2 images:
42+
PATH_TO_TEST_IMAGES_DIR = '/Users/hluong/PycharmProjects/CorgiRecognition/test_corgi_images'
43+
TEST_IMAGE_PATHS = [os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 3)]
44+
45+
# Size, in inches, of the output images.
46+
IMAGE_SIZE = (12, 8)
47+
48+
49+
def run_inference_for_single_image(image, graph):
50+
with graph.as_default():
51+
with tf.Session() as sess:
52+
# Get handles to input and output tensors
53+
ops = tf.get_default_graph().get_operations()
54+
all_tensor_names = {output.name for op in ops for output in op.outputs}
55+
tensor_dict = {}
56+
for key in [
57+
'num_detections', 'detection_boxes', 'detection_scores',
58+
'detection_classes', 'detection_masks'
59+
]:
60+
tensor_name = key + ':0'
61+
if tensor_name in all_tensor_names:
62+
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
63+
tensor_name)
64+
if 'detection_masks' in tensor_dict:
65+
# The following processing is only for single image
66+
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
67+
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
68+
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
69+
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
70+
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
71+
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
72+
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
73+
detection_masks, detection_boxes, image.shape[1], image.shape[2])
74+
detection_masks_reframed = tf.cast(
75+
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
76+
# Follow the convention by adding back the batch dimension
77+
tensor_dict['detection_masks'] = tf.expand_dims(
78+
detection_masks_reframed, 0)
79+
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
80+
# Run inference
81+
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: image})
82+
83+
# all outputs are float32 numpy arrays, so convert types as appropriate
84+
output_dict['num_detections'] = int(output_dict['num_detections'][0])
85+
output_dict['detection_classes'] = output_dict[
86+
'detection_classes'][0].astype(np.int64)
87+
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
88+
output_dict['detection_scores'] = output_dict['detection_scores'][0]
89+
if 'detection_masks' in output_dict:
90+
output_dict['detection_masks'] = output_dict['detection_masks'][0]
91+
return output_dict
92+
93+
94+
for image_path in TEST_IMAGE_PATHS:
95+
image = Image.open(image_path)
96+
# the array based representation of the image will be used later in order to prepare the
97+
# result image with boxes and labels on it.
98+
image_np = load_image_into_numpy_array(image)
99+
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
100+
image_np_expanded = np.expand_dims(image_np, axis=0)
101+
# Actual detection.
102+
output_dict = run_inference_for_single_image(image_np_expanded, detection_graph)
103+
# Visualization of the results of a detection.
104+
image_np = vis_util.visualize_boxes_and_labels_on_image_array(
105+
image_np,
106+
output_dict['detection_boxes'],
107+
output_dict['detection_classes'],
108+
output_dict['detection_scores'],
109+
category_index,
110+
instance_masks=output_dict.get('detection_masks_reframed', None),
111+
use_normalized_coordinates=True,
112+
line_thickness=8)
113+
plt.figure(figsize=IMAGE_SIZE)
114+
plt.imshow(image_np)
115+
plt.show()

0 commit comments

Comments
 (0)