-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
27 changed files
with
6,036 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2,3 +2,4 @@ | |
.DS_Store | ||
my_* | ||
images/**/*.png | ||
*.pyc |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
This directory was copied from: | ||
https://github.com/tensorflow/models/blob/master/slim/nets | ||
|
||
On Sept. 25th, 2016. Commit: | ||
https://github.com/tensorflow/models/commit/65fad62dc6daca5a72c204013824cc380939d457 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,125 @@ | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
# ============================================================================== | ||
"""Contains a model definition for AlexNet. | ||
This work was first described in: | ||
ImageNet Classification with Deep Convolutional Neural Networks | ||
Alex Krizhevsky, Ilya Sutskever and Geoffrey E. Hinton | ||
and later refined in: | ||
One weird trick for parallelizing convolutional neural networks | ||
Alex Krizhevsky, 2014 | ||
Here we provide the implementation proposed in "One weird trick" and not | ||
"ImageNet Classification", as per the paper, the LRN layers have been removed. | ||
Usage: | ||
with slim.arg_scope(alexnet.alexnet_v2_arg_scope()): | ||
outputs, end_points = alexnet.alexnet_v2(inputs) | ||
@@alexnet_v2 | ||
""" | ||
|
||
from __future__ import absolute_import | ||
from __future__ import division | ||
from __future__ import print_function | ||
|
||
import tensorflow as tf | ||
|
||
slim = tf.contrib.slim | ||
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev) | ||
|
||
|
||
def alexnet_v2_arg_scope(weight_decay=0.0005): | ||
with slim.arg_scope([slim.conv2d, slim.fully_connected], | ||
activation_fn=tf.nn.relu, | ||
biases_initializer=tf.constant_initializer(0.1), | ||
weights_regularizer=slim.l2_regularizer(weight_decay)): | ||
with slim.arg_scope([slim.conv2d], padding='SAME'): | ||
with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc: | ||
return arg_sc | ||
|
||
|
||
def alexnet_v2(inputs, | ||
num_classes=1000, | ||
is_training=True, | ||
dropout_keep_prob=0.5, | ||
spatial_squeeze=True, | ||
scope='alexnet_v2'): | ||
"""AlexNet version 2. | ||
Described in: http://arxiv.org/pdf/1404.5997v2.pdf | ||
Parameters from: | ||
github.com/akrizhevsky/cuda-convnet2/blob/master/layers/ | ||
layers-imagenet-1gpu.cfg | ||
Note: All the fully_connected layers have been transformed to conv2d layers. | ||
To use in classification mode, resize input to 224x224. To use in fully | ||
convolutional mode, set spatial_squeeze to false. | ||
The LRN layers have been removed and change the initializers from | ||
random_normal_initializer to xavier_initializer. | ||
Args: | ||
inputs: a tensor of size [batch_size, height, width, channels]. | ||
num_classes: number of predicted classes. | ||
is_training: whether or not the model is being trained. | ||
dropout_keep_prob: the probability that activations are kept in the dropout | ||
layers during training. | ||
spatial_squeeze: whether or not should squeeze the spatial dimensions of the | ||
outputs. Useful to remove unnecessary dimensions for classification. | ||
scope: Optional scope for the variables. | ||
Returns: | ||
the last op containing the log predictions and end_points dict. | ||
""" | ||
with tf.variable_scope(scope, 'alexnet_v2', [inputs]) as sc: | ||
end_points_collection = sc.name + '_end_points' | ||
# Collect outputs for conv2d, fully_connected and max_pool2d. | ||
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d], | ||
outputs_collections=[end_points_collection]): | ||
net = slim.conv2d(inputs, 64, [11, 11], 4, padding='VALID', | ||
scope='conv1') | ||
net = slim.max_pool2d(net, [3, 3], 2, scope='pool1') | ||
net = slim.conv2d(net, 192, [5, 5], scope='conv2') | ||
net = slim.max_pool2d(net, [3, 3], 2, scope='pool2') | ||
net = slim.conv2d(net, 384, [3, 3], scope='conv3') | ||
net = slim.conv2d(net, 384, [3, 3], scope='conv4') | ||
net = slim.conv2d(net, 256, [3, 3], scope='conv5') | ||
net = slim.max_pool2d(net, [3, 3], 2, scope='pool5') | ||
|
||
# Use conv2d instead of fully_connected layers. | ||
with slim.arg_scope([slim.conv2d], | ||
weights_initializer=trunc_normal(0.005), | ||
biases_initializer=tf.constant_initializer(0.1)): | ||
net = slim.conv2d(net, 4096, [5, 5], padding='VALID', | ||
scope='fc6') | ||
net = slim.dropout(net, dropout_keep_prob, is_training=is_training, | ||
scope='dropout6') | ||
net = slim.conv2d(net, 4096, [1, 1], scope='fc7') | ||
net = slim.dropout(net, dropout_keep_prob, is_training=is_training, | ||
scope='dropout7') | ||
net = slim.conv2d(net, num_classes, [1, 1], | ||
activation_fn=None, | ||
normalizer_fn=None, | ||
biases_initializer=tf.zeros_initializer, | ||
scope='fc8') | ||
|
||
# Convert end_points_collection into a end_point dict. | ||
end_points = dict(tf.get_collection(end_points_collection)) | ||
if spatial_squeeze: | ||
net = tf.squeeze(net, [1, 2], name='fc8/squeezed') | ||
end_points[sc.name + '/fc8'] = net | ||
return net, end_points | ||
alexnet_v2.default_image_size = 224 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,145 @@ | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
# ============================================================================== | ||
"""Tests for slim.nets.alexnet.""" | ||
from __future__ import absolute_import | ||
from __future__ import division | ||
from __future__ import print_function | ||
|
||
import tensorflow as tf | ||
|
||
from nets import alexnet | ||
|
||
slim = tf.contrib.slim | ||
|
||
|
||
class AlexnetV2Test(tf.test.TestCase): | ||
|
||
def testBuild(self): | ||
batch_size = 5 | ||
height, width = 224, 224 | ||
num_classes = 1000 | ||
with self.test_session(): | ||
inputs = tf.random_uniform((batch_size, height, width, 3)) | ||
logits, _ = alexnet.alexnet_v2(inputs, num_classes) | ||
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/squeezed') | ||
self.assertListEqual(logits.get_shape().as_list(), | ||
[batch_size, num_classes]) | ||
|
||
def testFullyConvolutional(self): | ||
batch_size = 1 | ||
height, width = 300, 400 | ||
num_classes = 1000 | ||
with self.test_session(): | ||
inputs = tf.random_uniform((batch_size, height, width, 3)) | ||
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False) | ||
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd') | ||
self.assertListEqual(logits.get_shape().as_list(), | ||
[batch_size, 4, 7, num_classes]) | ||
|
||
def testEndPoints(self): | ||
batch_size = 5 | ||
height, width = 224, 224 | ||
num_classes = 1000 | ||
with self.test_session(): | ||
inputs = tf.random_uniform((batch_size, height, width, 3)) | ||
_, end_points = alexnet.alexnet_v2(inputs, num_classes) | ||
expected_names = ['alexnet_v2/conv1', | ||
'alexnet_v2/pool1', | ||
'alexnet_v2/conv2', | ||
'alexnet_v2/pool2', | ||
'alexnet_v2/conv3', | ||
'alexnet_v2/conv4', | ||
'alexnet_v2/conv5', | ||
'alexnet_v2/pool5', | ||
'alexnet_v2/fc6', | ||
'alexnet_v2/fc7', | ||
'alexnet_v2/fc8' | ||
] | ||
self.assertSetEqual(set(end_points.keys()), set(expected_names)) | ||
|
||
def testModelVariables(self): | ||
batch_size = 5 | ||
height, width = 224, 224 | ||
num_classes = 1000 | ||
with self.test_session(): | ||
inputs = tf.random_uniform((batch_size, height, width, 3)) | ||
alexnet.alexnet_v2(inputs, num_classes) | ||
expected_names = ['alexnet_v2/conv1/weights', | ||
'alexnet_v2/conv1/biases', | ||
'alexnet_v2/conv2/weights', | ||
'alexnet_v2/conv2/biases', | ||
'alexnet_v2/conv3/weights', | ||
'alexnet_v2/conv3/biases', | ||
'alexnet_v2/conv4/weights', | ||
'alexnet_v2/conv4/biases', | ||
'alexnet_v2/conv5/weights', | ||
'alexnet_v2/conv5/biases', | ||
'alexnet_v2/fc6/weights', | ||
'alexnet_v2/fc6/biases', | ||
'alexnet_v2/fc7/weights', | ||
'alexnet_v2/fc7/biases', | ||
'alexnet_v2/fc8/weights', | ||
'alexnet_v2/fc8/biases', | ||
] | ||
model_variables = [v.op.name for v in slim.get_model_variables()] | ||
self.assertSetEqual(set(model_variables), set(expected_names)) | ||
|
||
def testEvaluation(self): | ||
batch_size = 2 | ||
height, width = 224, 224 | ||
num_classes = 1000 | ||
with self.test_session(): | ||
eval_inputs = tf.random_uniform((batch_size, height, width, 3)) | ||
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False) | ||
self.assertListEqual(logits.get_shape().as_list(), | ||
[batch_size, num_classes]) | ||
predictions = tf.argmax(logits, 1) | ||
self.assertListEqual(predictions.get_shape().as_list(), [batch_size]) | ||
|
||
def testTrainEvalWithReuse(self): | ||
train_batch_size = 2 | ||
eval_batch_size = 1 | ||
train_height, train_width = 224, 224 | ||
eval_height, eval_width = 300, 400 | ||
num_classes = 1000 | ||
with self.test_session(): | ||
train_inputs = tf.random_uniform( | ||
(train_batch_size, train_height, train_width, 3)) | ||
logits, _ = alexnet.alexnet_v2(train_inputs) | ||
self.assertListEqual(logits.get_shape().as_list(), | ||
[train_batch_size, num_classes]) | ||
tf.get_variable_scope().reuse_variables() | ||
eval_inputs = tf.random_uniform( | ||
(eval_batch_size, eval_height, eval_width, 3)) | ||
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False, | ||
spatial_squeeze=False) | ||
self.assertListEqual(logits.get_shape().as_list(), | ||
[eval_batch_size, 4, 7, num_classes]) | ||
logits = tf.reduce_mean(logits, [1, 2]) | ||
predictions = tf.argmax(logits, 1) | ||
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size]) | ||
|
||
def testForward(self): | ||
batch_size = 1 | ||
height, width = 224, 224 | ||
with self.test_session() as sess: | ||
inputs = tf.random_uniform((batch_size, height, width, 3)) | ||
logits, _ = alexnet.alexnet_v2(inputs) | ||
sess.run(tf.initialize_all_variables()) | ||
output = sess.run(logits) | ||
self.assertTrue(output.any()) | ||
|
||
if __name__ == '__main__': | ||
tf.test.main() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,112 @@ | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
# ============================================================================== | ||
"""Contains a variant of the CIFAR-10 model definition.""" | ||
|
||
from __future__ import absolute_import | ||
from __future__ import division | ||
from __future__ import print_function | ||
|
||
import tensorflow as tf | ||
|
||
slim = tf.contrib.slim | ||
|
||
trunc_normal = lambda stddev: tf.truncated_normal_initializer(stddev=stddev) | ||
|
||
|
||
def cifarnet(images, num_classes=10, is_training=False, | ||
dropout_keep_prob=0.5, | ||
prediction_fn=slim.softmax, | ||
scope='CifarNet'): | ||
"""Creates a variant of the CifarNet model. | ||
Note that since the output is a set of 'logits', the values fall in the | ||
interval of (-infinity, infinity). Consequently, to convert the outputs to a | ||
probability distribution over the characters, one will need to convert them | ||
using the softmax function: | ||
logits = cifarnet.cifarnet(images, is_training=False) | ||
probabilities = tf.nn.softmax(logits) | ||
predictions = tf.argmax(logits, 1) | ||
Args: | ||
images: A batch of `Tensors` of size [batch_size, height, width, channels]. | ||
num_classes: the number of classes in the dataset. | ||
is_training: specifies whether or not we're currently training the model. | ||
This variable will determine the behaviour of the dropout layer. | ||
dropout_keep_prob: the percentage of activation values that are retained. | ||
prediction_fn: a function to get predictions out of logits. | ||
scope: Optional variable_scope. | ||
Returns: | ||
logits: the pre-softmax activations, a tensor of size | ||
[batch_size, `num_classes`] | ||
end_points: a dictionary from components of the network to the corresponding | ||
activation. | ||
""" | ||
end_points = {} | ||
|
||
with tf.variable_scope(scope, 'CifarNet', [images, num_classes]): | ||
net = slim.conv2d(images, 64, [5, 5], scope='conv1') | ||
end_points['conv1'] = net | ||
net = slim.max_pool2d(net, [2, 2], 2, scope='pool1') | ||
end_points['pool1'] = net | ||
net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm1') | ||
net = slim.conv2d(net, 64, [5, 5], scope='conv2') | ||
end_points['conv2'] = net | ||
net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm2') | ||
net = slim.max_pool2d(net, [2, 2], 2, scope='pool2') | ||
end_points['pool2'] = net | ||
net = slim.flatten(net) | ||
end_points['Flatten'] = net | ||
net = slim.fully_connected(net, 384, scope='fc3') | ||
end_points['fc3'] = net | ||
net = slim.dropout(net, dropout_keep_prob, is_training=is_training, | ||
scope='dropout3') | ||
net = slim.fully_connected(net, 192, scope='fc4') | ||
end_points['fc4'] = net | ||
logits = slim.fully_connected(net, num_classes, | ||
biases_initializer=tf.zeros_initializer, | ||
weights_initializer=trunc_normal(1/192.0), | ||
weights_regularizer=None, | ||
activation_fn=None, | ||
scope='logits') | ||
|
||
end_points['Logits'] = logits | ||
end_points['Predictions'] = prediction_fn(logits, scope='Predictions') | ||
|
||
return logits, end_points | ||
cifarnet.default_image_size = 32 | ||
|
||
|
||
def cifarnet_arg_scope(weight_decay=0.004): | ||
"""Defines the default cifarnet argument scope. | ||
Args: | ||
weight_decay: The weight decay to use for regularizing the model. | ||
Returns: | ||
An `arg_scope` to use for the inception v3 model. | ||
""" | ||
with slim.arg_scope( | ||
[slim.conv2d], | ||
weights_initializer=tf.truncated_normal_initializer(stddev=5e-2), | ||
activation_fn=tf.nn.relu): | ||
with slim.arg_scope( | ||
[slim.fully_connected], | ||
biases_initializer=tf.constant_initializer(0.1), | ||
weights_initializer=trunc_normal(0.04), | ||
weights_regularizer=slim.l2_regularizer(weight_decay), | ||
activation_fn=tf.nn.relu) as sc: | ||
return sc |
Oops, something went wrong.