Skip to content

Commit

Permalink
Remove barrier, add tf.identity where appropriate, and make sure test…
Browse files Browse the repository at this point in the history
…s pass
  • Loading branch information
nealwu committed May 23, 2017
1 parent 8e54ffc commit 99f9442
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 12 deletions.
14 changes: 4 additions & 10 deletions inception/inception/slim/ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -418,7 +418,7 @@ def testCreateDropout(self):
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.dropout(images)
self.assertEquals(output.op.name, 'Dropout/dropout/mul_1')
self.assertEquals(output.op.name, 'Dropout/dropout/mul')
output.get_shape().assert_is_compatible_with(images.get_shape())

def testCreateDropoutNoTraining(self):
Expand Down Expand Up @@ -599,9 +599,7 @@ def testComputeMovingVars(self):
output = ops.batch_norm(images, decay=0.1)
update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
with tf.control_dependencies(update_ops):
barrier = tf.no_op(name='gradient_barrier')
with tf.control_dependencies([barrier]):
output = tf.identity(output)
output = tf.identity(output)
# Initialize all variables
sess.run(tf.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
Expand Down Expand Up @@ -630,9 +628,7 @@ def testEvalMovingVars(self):
output = ops.batch_norm(images, decay=0.1, is_training=False)
update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
with tf.control_dependencies(update_ops):
barrier = tf.no_op(name='gradient_barrier')
with tf.control_dependencies([barrier]):
output = tf.identity(output)
output = tf.identity(output)
# Initialize all variables
sess.run(tf.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
Expand Down Expand Up @@ -665,9 +661,7 @@ def testReuseVars(self):
output = ops.batch_norm(images, decay=0.1, is_training=False)
update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
with tf.control_dependencies(update_ops):
barrier = tf.no_op(name='gradient_barrier')
with tf.control_dependencies([barrier]):
output = tf.identity(output)
output = tf.identity(output)
# Initialize all variables
sess.run(tf.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
Expand Down
2 changes: 1 addition & 1 deletion slim/deployment/model_deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,7 @@ def deploy(config,

update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_op = total_loss
train_op = tf.identity(total_loss, name='train_op')
else:
clones_losses = []
regularization_losses = tf.get_collection(
Expand Down
2 changes: 1 addition & 1 deletion slim/train_image_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -540,7 +540,7 @@ def clone_fn(batch_queue):

update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_tensor = total_loss
train_tensor = tf.identity(total_loss, name='train_op')

# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
Expand Down

0 comments on commit 99f9442

Please sign in to comment.