Skip to content

Commit

Permalink
Upgrade script to tf2
Browse files Browse the repository at this point in the history
  • Loading branch information
objorkman committed Sep 25, 2021
1 parent e96b394 commit a970ee6
Show file tree
Hide file tree
Showing 17 changed files with 266 additions and 266 deletions.
8 changes: 4 additions & 4 deletions data_provider/lanenet_data_feed_pipline.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ def __len__(self):
assert ops.exists(tfrecords_file_paths), '{:s} not exist'.format(tfrecords_file_paths)

sample_counts = 0
sample_counts += sum(1 for _ in tf.python_io.tf_record_iterator(tfrecords_file_paths))
sample_counts += sum(1 for _ in tf.compat.v1.python_io.tf_record_iterator(tfrecords_file_paths))
if self._dataset_flags == 'train':
num_batchs = int(np.ceil(sample_counts / self._train_batch_size))
elif self._dataset_flags == 'val':
Expand All @@ -273,7 +273,7 @@ def next_batch(self, batch_size):
assert ops.exists(tfrecords_file_paths), '{:s} not exist'.format(tfrecords_file_paths)

with tf.device('/cpu:0'):
with tf.name_scope('input_tensor'):
with tf.compat.v1.name_scope('input_tensor'):

# TFRecordDataset opens a binary file and reads one record at a time.
# `tfrecords_file_paths` could also be a list of filenames, which will be read in order.
Expand Down Expand Up @@ -307,7 +307,7 @@ def next_batch(self, batch_size):
dataset = dataset.batch(batch_size=batch_size, drop_remainder=True)
dataset = dataset.prefetch(buffer_size=128)

iterator = dataset.make_one_shot_iterator()
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)

return iterator.get_next(name='{:s}_IteratorGetNext'.format(self._dataset_flags))

Expand All @@ -321,7 +321,7 @@ def next_batch(self, batch_size):
src_images, binary_label_images, instance_label_images = train_dataset.next_batch(batch_size=8)

count = 1
with tf.Session() as sess:
with tf.compat.v1.Session() as sess:
while True:
try:
t_start = time.time()
Expand Down
24 changes: 12 additions & 12 deletions data_provider/tf_io_pipline_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def write_example_tfrecords(gt_images_paths, gt_binary_images_paths, gt_instance

log.info('Writing {:s}....'.format(tfrecords_path))

with tf.python_io.TFRecordWriter(tfrecords_path) as _writer:
with tf.io.TFRecordWriter(tfrecords_path) as _writer:
for _index, _gt_image_path in enumerate(gt_images_paths):

# prepare gt image
Expand Down Expand Up @@ -111,28 +111,28 @@ def decode(serialized_example):
:param serialized_example:
:return:
"""
features = tf.parse_single_example(
serialized_example,
features = tf.io.parse_single_example(
serialized=serialized_example,
# Defaults are not specified since both keys are required.
features={
'gt_image_raw': tf.FixedLenFeature([], tf.string),
'gt_binary_image_raw': tf.FixedLenFeature([], tf.string),
'gt_instance_image_raw': tf.FixedLenFeature([], tf.string)
'gt_image_raw': tf.io.FixedLenFeature([], tf.string),
'gt_binary_image_raw': tf.io.FixedLenFeature([], tf.string),
'gt_instance_image_raw': tf.io.FixedLenFeature([], tf.string)
})

# decode gt image
gt_image_shape = tf.stack([RESIZE_IMAGE_HEIGHT, RESIZE_IMAGE_WIDTH, 3])
gt_image = tf.decode_raw(features['gt_image_raw'], tf.uint8)
gt_image = tf.io.decode_raw(features['gt_image_raw'], tf.uint8)
gt_image = tf.reshape(gt_image, gt_image_shape)

# decode gt binary image
gt_binary_image_shape = tf.stack([RESIZE_IMAGE_HEIGHT, RESIZE_IMAGE_WIDTH, 1])
gt_binary_image = tf.decode_raw(features['gt_binary_image_raw'], tf.uint8)
gt_binary_image = tf.io.decode_raw(features['gt_binary_image_raw'], tf.uint8)
gt_binary_image = tf.reshape(gt_binary_image, gt_binary_image_shape)

# decode gt instance image
gt_instance_image_shape = tf.stack([RESIZE_IMAGE_HEIGHT, RESIZE_IMAGE_WIDTH, 1])
gt_instance_image = tf.decode_raw(features['gt_instance_image_raw'], tf.uint8)
gt_instance_image = tf.io.decode_raw(features['gt_instance_image_raw'], tf.uint8)
gt_instance_image = tf.reshape(gt_instance_image, gt_instance_image_shape)

return gt_image, gt_binary_image, gt_instance_image
Expand Down Expand Up @@ -248,8 +248,8 @@ def random_crop_batch_images(gt_image, gt_binary_image, gt_instance_image, cropp

concat_cropped_images = tf.image.random_crop(
concat_images,
[cropped_size[1], cropped_size[0], tf.shape(concat_images)[-1]],
seed=tf.random.set_random_seed(1234)
[cropped_size[1], cropped_size[0], tf.shape(input=concat_images)[-1]],
seed=tf.compat.v1.random.set_random_seed(1234)
)

cropped_gt_image = tf.slice(
Expand Down Expand Up @@ -285,7 +285,7 @@ def random_horizon_flip_batch_images(gt_image, gt_binary_image, gt_instance_imag

concat_flipped_images = tf.image.random_flip_left_right(
image=concat_images,
seed=tf.random.set_random_seed(1)
seed=tf.compat.v1.random.set_random_seed(1)
)

flipped_gt_image = tf.slice(
Expand Down
4 changes: 2 additions & 2 deletions lanenet_model/lanenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def inference(self, input_tensor, name, reuse=False):
:param reuse
:return:
"""
with tf.variable_scope(name_or_scope=name, reuse=reuse):
with tf.compat.v1.variable_scope(name_or_scope=name, reuse=reuse):
# first extract image features
extract_feats_result = self._frontend.build_model(
input_tensor=input_tensor,
Expand Down Expand Up @@ -72,7 +72,7 @@ def compute_loss(self, input_tensor, binary_label, instance_label, name, reuse=F
:param reuse:
:return:
"""
with tf.variable_scope(name_or_scope=name, reuse=reuse):
with tf.compat.v1.variable_scope(name_or_scope=name, reuse=reuse):
# first extract image features
extract_feats_result = self._frontend.build_model(
input_tensor=input_tensor,
Expand Down
22 changes: 11 additions & 11 deletions lanenet_model/lanenet_back_end.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,9 @@ def _compute_class_weighted_cross_entropy_loss(cls, onehot_labels, logits, class
:param classes_weights:
:return:
"""
loss_weights = tf.reduce_sum(tf.multiply(onehot_labels, classes_weights), axis=3)
loss_weights = tf.reduce_sum(input_tensor=tf.multiply(onehot_labels, classes_weights), axis=3)

loss = tf.losses.softmax_cross_entropy(
loss = tf.compat.v1.losses.softmax_cross_entropy(
onehot_labels=onehot_labels,
logits=logits,
weights=loss_weights
Expand All @@ -77,9 +77,9 @@ def compute_loss(self, binary_seg_logits, binary_label,
:param reuse:
:return:
"""
with tf.variable_scope(name_or_scope=name, reuse=reuse):
with tf.compat.v1.variable_scope(name_or_scope=name, reuse=reuse):
# calculate class weighted binary seg loss
with tf.variable_scope(name_or_scope='binary_seg'):
with tf.compat.v1.variable_scope(name_or_scope='binary_seg'):
binary_label_onehot = tf.one_hot(
tf.reshape(
tf.cast(binary_label, tf.int32),
Expand All @@ -100,7 +100,7 @@ def compute_loss(self, binary_seg_logits, binary_label,
counts = tf.cast(counts, tf.float32)
inverse_weights = tf.divide(
1.0,
tf.log(tf.add(tf.divide(counts, tf.reduce_sum(counts)), tf.constant(1.02)))
tf.math.log(tf.add(tf.divide(counts, tf.reduce_sum(input_tensor=counts)), tf.constant(1.02)))
)

binary_segmenatation_loss = self._compute_class_weighted_cross_entropy_loss(
Expand All @@ -110,7 +110,7 @@ def compute_loss(self, binary_seg_logits, binary_label,
)

# calculate class weighted instance seg loss
with tf.variable_scope(name_or_scope='instance_seg'):
with tf.compat.v1.variable_scope(name_or_scope='instance_seg'):

pix_bn = self.layerbn(
inputdata=instance_seg_logits, is_training=self._is_training, name='pix_bn')
Expand All @@ -130,7 +130,7 @@ def compute_loss(self, binary_seg_logits, binary_label,
)

l2_reg_loss = tf.constant(0.0, tf.float32)
for vv in tf.trainable_variables():
for vv in tf.compat.v1.trainable_variables():
if 'bn' in vv.name or 'gn' in vv.name:
continue
else:
Expand All @@ -157,13 +157,13 @@ def inference(self, binary_seg_logits, instance_seg_logits, name, reuse):
:param reuse:
:return:
"""
with tf.variable_scope(name_or_scope=name, reuse=reuse):
with tf.compat.v1.variable_scope(name_or_scope=name, reuse=reuse):

with tf.variable_scope(name_or_scope='binary_seg'):
with tf.compat.v1.variable_scope(name_or_scope='binary_seg'):
binary_seg_score = tf.nn.softmax(logits=binary_seg_logits)
binary_seg_prediction = tf.argmax(binary_seg_score, axis=-1)
binary_seg_prediction = tf.argmax(input=binary_seg_score, axis=-1)

with tf.variable_scope(name_or_scope='instance_seg'):
with tf.compat.v1.variable_scope(name_or_scope='instance_seg'):

pix_bn = self.layerbn(
inputdata=instance_seg_logits, is_training=self._is_training, name='pix_bn')
Expand Down
36 changes: 18 additions & 18 deletions lanenet_model/lanenet_discriminative_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,22 +43,22 @@ def discriminative_loss_single(
# calculate instance nums
unique_labels, unique_id, counts = tf.unique_with_counts(correct_label)
counts = tf.cast(counts, tf.float32)
num_instances = tf.size(unique_labels)
num_instances = tf.size(input=unique_labels)

# calculate instance pixel embedding mean vec
segmented_sum = tf.unsorted_segment_sum(
segmented_sum = tf.math.unsorted_segment_sum(
reshaped_pred, unique_id, num_instances)
mu = tf.div(segmented_sum, tf.reshape(counts, (-1, 1)))
mu = tf.compat.v1.div(segmented_sum, tf.reshape(counts, (-1, 1)))
mu_expand = tf.gather(mu, unique_id)

distance = tf.norm(tf.subtract(mu_expand, reshaped_pred), axis=1, ord=1)
distance = tf.norm(tensor=tf.subtract(mu_expand, reshaped_pred), axis=1, ord=1)
distance = tf.subtract(distance, delta_v)
distance = tf.clip_by_value(distance, 0., distance)
distance = tf.square(distance)

l_var = tf.unsorted_segment_sum(distance, unique_id, num_instances)
l_var = tf.div(l_var, counts)
l_var = tf.reduce_sum(l_var)
l_var = tf.math.unsorted_segment_sum(distance, unique_id, num_instances)
l_var = tf.compat.v1.div(l_var, counts)
l_var = tf.reduce_sum(input_tensor=l_var)
l_var = tf.divide(l_var, tf.cast(num_instances, tf.float32))

mu_interleaved_rep = tf.tile(mu, [num_instances, 1])
Expand All @@ -71,19 +71,19 @@ def discriminative_loss_single(

mu_diff = tf.subtract(mu_band_rep, mu_interleaved_rep)

intermediate_tensor = tf.reduce_sum(tf.abs(mu_diff), axis=1)
intermediate_tensor = tf.reduce_sum(input_tensor=tf.abs(mu_diff), axis=1)
zero_vector = tf.zeros(1, dtype=tf.float32)
bool_mask = tf.not_equal(intermediate_tensor, zero_vector)
mu_diff_bool = tf.boolean_mask(mu_diff, bool_mask)
mu_diff_bool = tf.boolean_mask(tensor=mu_diff, mask=bool_mask)

mu_norm = tf.norm(mu_diff_bool, axis=1, ord=1)
mu_norm = tf.norm(tensor=mu_diff_bool, axis=1, ord=1)
mu_norm = tf.subtract(2. * delta_d, mu_norm)
mu_norm = tf.clip_by_value(mu_norm, 0., mu_norm)
mu_norm = tf.square(mu_norm)

l_dist = tf.reduce_mean(mu_norm)
l_dist = tf.reduce_mean(input_tensor=mu_norm)

l_reg = tf.reduce_mean(tf.norm(mu, axis=1, ord=1))
l_reg = tf.reduce_mean(input_tensor=tf.norm(tensor=mu, axis=1, ord=1))

param_scale = 1.
l_var = param_var * l_var
Expand All @@ -103,7 +103,7 @@ def discriminative_loss(prediction, correct_label, feature_dim, image_shape,
"""

def cond(label, batch, out_loss, out_var, out_dist, out_reg, i):
return tf.less(i, tf.shape(batch)[0])
return tf.less(i, tf.shape(input=batch)[0])

def body(label, batch, out_loss, out_var, out_dist, out_reg, i):
disc_loss, l_var, l_dist, l_reg = discriminative_loss_single(
Expand All @@ -127,16 +127,16 @@ def body(label, batch, out_loss, out_var, out_dist, out_reg, i):
dtype=tf.float32, size=0, dynamic_size=True)

_, _, out_loss_op, out_var_op, out_dist_op, out_reg_op, _ = tf.while_loop(
cond, body, [
cond=cond, body=body, loop_vars=[
correct_label, prediction, output_ta_loss, output_ta_var, output_ta_dist, output_ta_reg, 0])
out_loss_op = out_loss_op.stack()
out_var_op = out_var_op.stack()
out_dist_op = out_dist_op.stack()
out_reg_op = out_reg_op.stack()

disc_loss = tf.reduce_mean(out_loss_op)
l_var = tf.reduce_mean(out_var_op)
l_dist = tf.reduce_mean(out_dist_op)
l_reg = tf.reduce_mean(out_reg_op)
disc_loss = tf.reduce_mean(input_tensor=out_loss_op)
l_var = tf.reduce_mean(input_tensor=out_var_op)
l_dist = tf.reduce_mean(input_tensor=out_dist_op)
l_reg = tf.reduce_mean(input_tensor=out_reg_op)

return disc_loss, l_var, l_dist, l_reg
Empty file modified local_utils/config_utils/parse_config_utils.py
100755 → 100644
Empty file.
Empty file modified local_utils/log_util/__init__.py
100755 → 100644
Empty file.
Empty file modified local_utils/log_util/init_logger.py
100755 → 100644
Empty file.
16 changes: 8 additions & 8 deletions mnn_project/freeze_lanenet_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,31 +42,31 @@ def convert_ckpt_into_pb_file(ckpt_file_path, pb_file_path):
:return:
"""
# construct compute graph
with tf.variable_scope('lanenet'):
input_tensor = tf.placeholder(dtype=tf.float32, shape=[1, 256, 512, 3], name='input_tensor')
with tf.compat.v1.variable_scope('lanenet'):
input_tensor = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1, 256, 512, 3], name='input_tensor')

net = lanenet.LaneNet(phase='test', net_flag='vgg')
binary_seg_ret, instance_seg_ret = net.inference(input_tensor=input_tensor, name='lanenet_model')

with tf.variable_scope('lanenet/'):
with tf.compat.v1.variable_scope('lanenet/'):
binary_seg_ret = tf.cast(binary_seg_ret, dtype=tf.float32)
binary_seg_ret = tf.squeeze(binary_seg_ret, axis=0, name='final_binary_output')
instance_seg_ret = tf.squeeze(instance_seg_ret, axis=0, name='final_pixel_embedding_output')

# create a session
saver = tf.train.Saver()
saver = tf.compat.v1.train.Saver()

sess_config = tf.ConfigProto()
sess_config = tf.compat.v1.ConfigProto()
sess_config.gpu_options.per_process_gpu_memory_fraction = 0.85
sess_config.gpu_options.allow_growth = False
sess_config.gpu_options.allocator_type = 'BFC'

sess = tf.Session(config=sess_config)
sess = tf.compat.v1.Session(config=sess_config)

with sess.as_default():
saver.restore(sess, ckpt_file_path)

converted_graph_def = tf.graph_util.convert_variables_to_constants(
converted_graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(
sess,
input_graph_def=sess.graph.as_graph_def(),
output_node_names=[
Expand All @@ -76,7 +76,7 @@ def convert_ckpt_into_pb_file(ckpt_file_path, pb_file_path):
]
)

with tf.gfile.GFile(pb_file_path, "wb") as f:
with tf.io.gfile.GFile(pb_file_path, "wb") as f:
f.write(converted_graph_def.SerializeToString())


Expand Down
Loading

0 comments on commit a970ee6

Please sign in to comment.