Skip to content

Commit

Permalink
Move evaluation tool to bin.
Browse files Browse the repository at this point in the history
  • Loading branch information
Hans Gaiser committed Jan 24, 2018
1 parent fcb8469 commit 1b02c68
Show file tree
Hide file tree
Showing 8 changed files with 242 additions and 366 deletions.
74 changes: 0 additions & 74 deletions examples/evaluate.py

This file was deleted.

144 changes: 144 additions & 0 deletions keras_retinanet/bin/evaluate.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
#!/usr/bin/env python

"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""

import argparse
import os
import sys

import keras
import tensorflow as tf

# Allow relative imports when being executed as script.
if __name__ == "__main__" and __package__ is None:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
import keras_retinanet.bin
__package__ = "keras_retinanet.bin"

# Change these to absolute imports if you copy this script outside the keras_retinanet package.
from ..preprocessing.pascal_voc import PascalVocGenerator
from ..preprocessing.csv_generator import CSVGenerator
from ..utils.keras_version import check_keras_version
from ..utils.eval import evaluate
from ..models.resnet import custom_objects


def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)


def create_generator(args):
if args.dataset_type == 'coco':
# import here to prevent unnecessary dependency on cocoapi
from ..preprocessing.coco import CocoGenerator

validation_generator = CocoGenerator(
args.coco_path,
'val2017'
)
elif args.dataset_type == 'pascal':
validation_generator = PascalVocGenerator(
args.pascal_path,
'test',
batch_size=args.batch_size
)
elif args.dataset_type == 'csv':
validation_generator = CSVGenerator(
args.val_annotations,
args.classes,
batch_size=args.batch_size
)
else:
raise ValueError('Invalid data type received: {}'.format(args.dataset_type))

return validation_generator


def parse_args(args):
parser = argparse.ArgumentParser(description='Evaluation script for a RetinaNet network.')
subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')
subparsers.required = True

coco_parser = subparsers.add_parser('coco')
coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')

pascal_parser = subparsers.add_parser('pascal')
pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')

csv_parser = subparsers.add_parser('csv')
csv_parser.add_argument('annotations', help='Path to CSV file containing annotations for evaluation.')
csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.')

parser.add_argument('model', help='Path to RetinaNet model.')
parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')
parser.add_argument('--score-threshold', help='Threshold on score to filter detections with (defaults to 0.05).', default=0.05, type=float)
parser.add_argument('--iou-threshold', help='IoU Threshold to count for a positive detection (defaults to 0.5).', default=0.5, type=float)
parser.add_argument('--max-detections', help='Max Detections per image (defaults to 100).', default=100, type=int)
parser.add_argument('--save-path', help='Path for saving images with detections.')

parser.set_defaults(snapshots=True)

return parser.parse_args(args)


def main(args=None):
# parse arguments
if args is None:
args = sys.argv[1:]
args = parse_args(args)

# make sure keras is the minimum required version
check_keras_version()

# optionally choose specific GPU
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
keras.backend.tensorflow_backend.set_session(get_session())

# make save path if it doesn't exist
if args.save_path is not None and not os.path.exists(args.save_path):
os.makedirs(args.save_path)

# create the generator
generator = create_generator(args)

# load the model
print('Loading model, this may take a second...')
model = keras.models.load_model(args.model, custom_objects=custom_objects)

# print model summary
print(model.summary())

# start evaluation
average_precisions = evaluate(
generator,
model,
iou_threshold=args.iou_threshold,
score_threshold=args.score_threshold,
max_detections=args.max_detections,
save_path=args.save_path
)

# print evaluation
for label, average_precision in average_precisions.items():
print(generator.label_to_name(label), '{:.4f}'.format(average_precision))
print('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions)))

if __name__ == '__main__':
main()
4 changes: 2 additions & 2 deletions keras_retinanet/utils/coco_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def evaluate_coco(generator, model, threshold=0.05):
# start collecting results
results = []
image_ids = []
for i in range(len(generator.image_ids)):
for i in range(generator.size()):
image = generator.load_image(i)
image = generator.preprocess_image(image)
image, scale = generator.resize_image(image)
Expand Down Expand Up @@ -69,7 +69,7 @@ def evaluate_coco(generator, model, threshold=0.05):
image_ids.append(generator.image_ids[i])

# print progress
print('{}/{}'.format(i, len(generator.image_ids)), end='\r')
print('{}/{}'.format(i, generator.size()), end='\r')

if not len(results):
return
Expand Down
71 changes: 47 additions & 24 deletions keras_retinanet/utils/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@

from __future__ import print_function

from keras_retinanet.utils.anchors import compute_overlap
from .anchors import compute_overlap
from .visualization import draw_detections, draw_ground_truth

import numpy as np
import os
Expand Down Expand Up @@ -46,12 +47,12 @@ def _compute_ap(recall, precision):
return ap


def _get_detections(generator, model, score_threshold=0.05, max_detections=100):
def _get_detections(generator, model, score_threshold=0.05, max_detections=100, save_path=None):
all_detections = [[None for i in range(generator.num_classes())] for j in range(generator.size())]

for i in range(generator.size()):
image = generator.load_image(i)
image = generator.preprocess_image(image)
raw_image = generator.load_image(i)
image = generator.preprocess_image(raw_image.copy())
image, scale = generator.resize_image(image)

# run network
Expand Down Expand Up @@ -84,11 +85,16 @@ def _get_detections(generator, model, score_threshold=0.05, max_detections=100):
image_detections = np.append(image_boxes, image_scores, axis=1)
image_predicted_labels = indices[1][scores_sort]

if save_path is not None:
draw_ground_truth(raw_image, generator.load_annotations(i), generator=generator)
draw_detections(raw_image, detections[0, indices[0][scores_sort], :], generator=generator)

cv2.imwrite(os.path.join(save_path, '{}.png'.format(i)), raw_image)

# copy detections to all_detections
for label in range(generator.num_classes()):
all_detections[i][label] = image_detections[image_predicted_labels == label, :]

#print([generator.label_to_name(l) for l in image_predicted_labels])
print('{}/{}'.format(i, generator.size()), end='\r')

return all_detections
Expand All @@ -105,22 +111,30 @@ def _get_annotations(generator):
for label in range(generator.num_classes()):
all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()

#print([generator.label_to_name(l) for l in annotations[:, 4]])
print('{}/{}'.format(i, generator.size()), end='\r')

return all_annotations


def evaluate(generator, model, iou_threshold=0.5, score_threshold=0.05, max_detections=100):
#all_detections = _get_detections(generator, model, score_threshold=score_threshold, max_detections=max_detections)
#all_annotations = _get_annotations(generator)
average_precisions = np.zeros((0,))

all_detections = pickle.load(open('all_detections.pkl', 'rb'))
all_annotations = pickle.load(open('all_annotations.pkl', 'rb'))
#pickle.dump(all_detections, open('all_detections.pkl', 'wb'))
#pickle.dump(all_annotations, open('all_annotations.pkl', 'wb'))

def evaluate(
generator,
model,
iou_threshold=0.5,
score_threshold=0.05,
max_detections=100,
save_path=None
):
# gather all detections and annotations
all_detections = _get_detections(generator, model, score_threshold=score_threshold, max_detections=max_detections, save_path=save_path)
all_annotations = _get_annotations(generator)
average_precisions = {}

# all_detections = pickle.load(open('all_detections.pkl', 'rb'))
# all_annotations = pickle.load(open('all_annotations.pkl', 'rb'))
# pickle.dump(all_detections, open('all_detections.pkl', 'wb'))
# pickle.dump(all_annotations, open('all_annotations.pkl', 'wb'))

# process detections and annotations
for label in range(generator.num_classes()):
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
Expand Down Expand Up @@ -153,17 +167,26 @@ def evaluate(generator, model, iou_threshold=0.5, score_threshold=0.05, max_dete
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)

# no annotations -> AP for this class is 0 (is this correct?)
if num_annotations == 0:
average_precisions[label] = 0
continue

# sort by score
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]

false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
average_precision = _compute_ap(recall, precision)
average_precisions = np.append(average_precisions, average_precision)
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)

print(generator.label_to_name(label), average_precision)
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)

# compute average precision
average_precision = _compute_ap(recall, precision)
average_precisions[label] = average_precision

print('mAP: {}'.format(average_precisions.mean()))
return average_precisions
Loading

0 comments on commit 1b02c68

Please sign in to comment.