Skip to content

Commit

Permalink
Merge pull request OlafenwaMoses#336 from rola93/fix_file_paths
Browse files Browse the repository at this point in the history
Fix file paths
  • Loading branch information
OlafenwaMoses authored Sep 6, 2019
2 parents 01a2a7f + ce6749d commit 1bb1980
Show file tree
Hide file tree
Showing 4 changed files with 24 additions and 36 deletions.
31 changes: 11 additions & 20 deletions imageai/Detection/Custom/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,24 +110,20 @@ def setDataDirectory(self, data_directory):
:return:
"""

self.__train_images_folder = os.path.join(data_directory, "train/images/")
self.__train_annotations_folder = os.path.join(data_directory, "train/annotations/")
self.__validation_images_folder = os.path.join(data_directory, "validation/images/")
self.__validation_annotations_folder = os.path.join(data_directory, "validation/annotations/")
self.__train_images_folder = os.path.join(data_directory, "train", "images")
self.__train_annotations_folder = os.path.join(data_directory, "train", "annotations")
self.__validation_images_folder = os.path.join(data_directory, "validation", "images")
self.__validation_annotations_folder = os.path.join(data_directory, "validation", "annotations")

if os.path.exists(os.path.join(data_directory, "cache")) == False:
os.makedirs(os.path.join(data_directory, "cache"))
os.makedirs(os.path.join(data_directory, "cache"), exist_ok=True)
self.__train_cache_file = os.path.join(data_directory, "cache", "detection_train_data.pkl")
self.__validation_cache_file = os.path.join(data_directory, "cache", "detection_test_data.pkl")

if os.path.exists(os.path.join(data_directory, "models")) == False:
os.makedirs(os.path.join(data_directory, "models"))
os.makedirs(os.path.join(data_directory, "models"), exist_ok=True)

if os.path.exists(os.path.join(data_directory, "json")) == False:
os.makedirs(os.path.join(data_directory, "json"))
os.makedirs(os.path.join(data_directory, "json"), exist_ok=True)

if os.path.exists(os.path.join(data_directory, "logs")) == False:
os.makedirs(os.path.join(data_directory, "logs"))
os.makedirs(os.path.join(data_directory, "logs"), exist_ok=True)

self.__model_directory = os.path.join(data_directory, "models")
self.__train_weights_name = os.path.join(self.__model_directory, "detection_model-")
Expand All @@ -152,7 +148,7 @@ def setGpuUsage(self, train_gpus):
# let it as a string separated by commas
self.__train_gpus = ','.join([str(gpu) for gpu in train_gpus])

def setTrainConfig(self, object_names_array, batch_size= 4, num_experiments=100, train_from_pretrained_model=""):
def setTrainConfig(self, object_names_array, batch_size=4, num_experiments=100, train_from_pretrained_model=""):

"""
Expand All @@ -174,21 +170,17 @@ def setTrainConfig(self, object_names_array, batch_size= 4, num_experiments=100
self.__train_images_folder,
self.__train_cache_file, self.__model_labels)



self.__model_labels = sorted(object_names_array)
self.__num_objects = len(object_names_array)

self.__train_batch_size = batch_size
self.__train_epochs = num_experiments
self.__pre_trained_model = train_from_pretrained_model


json_data = {}
json_data = dict()
json_data["labels"] = self.__model_labels
json_data["anchors"] = self.__inference_anchors


with open(os.path.join(self.__json_directory, "detection_config.json"), "w+") as json_file:
json.dump(json_data, json_file, indent=4, separators=(",", " : "),
ensure_ascii=True)
Expand Down Expand Up @@ -219,7 +211,7 @@ def trainModel(self):
self.__model_labels

)
if(self.__training_mode):
if self.__training_mode:
print('Training on: \t' + str(labels) + '')
print("Training with Batch Size: ", self.__train_batch_size)
print("Number of Experiments: ", self.__train_epochs)
Expand Down Expand Up @@ -287,7 +279,6 @@ def trainModel(self):
###############################
callbacks = self._create_callbacks(self.__train_weights_name, infer_model)


train_model.fit_generator(
generator=train_generator,
steps_per_epoch=len(train_generator) * self.__train_times,
Expand Down
6 changes: 2 additions & 4 deletions imageai/Detection/Custom/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,13 @@

import argparse
import os
import numpy as np
import json
from imageai.Detection.Custom.voc import parse_voc_annotation
from imageai.Detection.Custom.yolo import create_yolov3_model
from imageai.Detection.Custom.generator import BatchGenerator
from imageai.Detection.Custom.utils.utils import normalize, evaluate
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.optimizers import Adam
from keras.models import load_model


def _main_(args):
config_path = args.conf

Expand Down Expand Up @@ -60,6 +57,7 @@ def _main_(args):
print(labels[label] + ': {:.4f}'.format(average_precision))
print('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions)))


if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Evaluate YOLO_v3 model on any dataset')
argparser.add_argument('-c', '--conf', help='path to configuration file')
Expand Down
7 changes: 3 additions & 4 deletions imageai/Detection/Custom/gen_anchors.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
import random
import argparse
import numpy as np

from imageai.Detection.Custom.voc import parse_voc_annotation
import json


def IOU(ann, centroids):
w, h = ann
Expand All @@ -24,6 +23,7 @@ def IOU(ann, centroids):

return np.array(similarities)


def avg_IOU(anns, centroids):
n,d = anns.shape
sum = 0.
Expand Down Expand Up @@ -69,6 +69,7 @@ def run_kmeans(ann_dims, anchor_num):
prev_assignments = assignments.copy()
old_distances = distances.copy()


def generateAnchors(train_annotation_folder, train_image_folder, train_cache_file, model_labels):

print("Generating anchor boxes for training images and annotation...")
Expand Down Expand Up @@ -101,7 +102,6 @@ def generateAnchors(train_annotation_folder, train_image_folder, train_cache_fil
widths = anchors[:, 0]
sorted_indices = np.argsort(widths)


anchor_array = []
reverse_anchor_array = []
out_string = ""
Expand All @@ -112,7 +112,6 @@ def generateAnchors(train_annotation_folder, train_image_folder, train_cache_fil

out_string += str(int(anchors[i, 0] * 416)) + ',' + str(int(anchors[i, 1] * 416)) + ', '


reverse_anchor_array.append(anchor_array[12:18])
reverse_anchor_array.append(anchor_array[6:12])
reverse_anchor_array.append(anchor_array[0:6])
Expand Down
16 changes: 8 additions & 8 deletions imageai/Detection/Custom/voc.py
Original file line number Diff line number Diff line change
@@ -1,30 +1,30 @@
import numpy as np
import os
import xml.etree.ElementTree as ET
import pickle


def parse_voc_annotation(ann_dir, img_dir, cache_name, labels=[]):
if os.path.exists(cache_name):
with open(cache_name, 'rb') as handle:
cache = pickle.load(handle)
all_insts, seen_labels = cache['all_insts'], cache['seen_labels']
else:
all_insts = []
seen_labels = {}
all_insts = list()
seen_labels = dict()

for ann in sorted(os.listdir(ann_dir)):
img = {'object':[]}
img = {'object': list()}

try:
tree = ET.parse(ann_dir + ann)
tree = ET.parse(os.path.join(ann_dir, ann))
except Exception as e:
print(e)
print('Ignore this bad annotation: ' + ann_dir + ann)
print('Ignore this bad annotation: ' + os.path.join(ann_dir, ann))
continue

for elem in tree.iter():
if 'filename' in elem.tag:
img['filename'] = img_dir + elem.text
img['filename'] = os.path.join(img_dir, elem.text)
if 'width' in elem.tag:
img['width'] = int(elem.text)
if 'height' in elem.tag:
Expand Down Expand Up @@ -64,4 +64,4 @@ def parse_voc_annotation(ann_dir, img_dir, cache_name, labels=[]):
with open(cache_name, 'wb') as handle:
pickle.dump(cache, handle, protocol=pickle.HIGHEST_PROTOCOL)

return all_insts, seen_labels
return all_insts, seen_labels

0 comments on commit 1bb1980

Please sign in to comment.