From 1ea1ee3b5b79d0ceb3589382f0645c44908b7c0e Mon Sep 17 00:00:00 2001 From: Mingxuan Yi Date: Tue, 14 Feb 2017 18:30:08 +0800 Subject: [PATCH 1/2] Update directory structure --- ...345\244\232GPU\345\271\266\350\241\214.py" | 28 +++++++++---------- ...345\217\226\346\225\260\346\215\256.ipynb" | 2 +- ...345\205\250\346\250\241\345\236\213.ipynb" | 2 +- ...346\255\243\345\210\231\345\214\226.ipynb" | 2 +- ...345\255\246\344\271\240\347\216\207.ipynb" | 2 +- ...346\264\273\345\207\275\346\225\260.ipynb" | 2 +- ...351\232\220\350\227\217\345\261\202.ipynb" | 2 +- ...345\212\250\345\271\263\345\235\207.ipynb" | 2 +- .../mnist_eval.ipynb" | 2 +- .../mnist_train.ipynb" | 2 +- ...347\247\273\345\255\246\344\271\240.ipynb" | 6 ++-- .../Chapter06/LeNet-5/LeNet5_train.ipynb | 2 +- ...344\276\213\347\250\213\345\272\217.ipynb" | 2 +- ...347\220\206\345\207\275\346\225\260.ipynb" | 2 +- ...346\225\264\346\240\267\344\276\213.ipynb" | 2 +- ...351\233\206\344\273\213\347\273\215.ipynb" | 2 +- ...350\250\200\346\250\241\345\236\213.ipynb" | 2 +- ...345\244\232GPU\345\271\266\350\241\214.py" | 28 +++++++++---------- 18 files changed, 46 insertions(+), 46 deletions(-) diff --git "a/Deep_Learning_with_TensorFlow/0.12.0/Chapter10/2. \345\244\232GPU\345\271\266\350\241\214.py" "b/Deep_Learning_with_TensorFlow/0.12.0/Chapter10/2. \345\244\232GPU\345\271\266\350\241\214.py" index 416f1980..b1677c2c 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.0/Chapter10/2. \345\244\232GPU\345\271\266\350\241\214.py" +++ "b/Deep_Learning_with_TensorFlow/0.12.0/Chapter10/2. \345\244\232GPU\345\271\266\350\241\214.py" @@ -16,15 +16,15 @@ N_GPU = 4 # 定义日志和模型输出的路径。 -MODEL_SAVE_PATH = "/path/to/logs_and_models/" +MODEL_SAVE_PATH = "logs_and_models/" MODEL_NAME = "model.ckpt" -DATA_PATH = "/path/to/data.tfrecords" +DATA_PATH = "output.tfrecords" # 定义输入队列得到训练数据,具体细节可以参考第七章。 def get_input(): filename_queue = tf.train.string_input_producer([DATA_PATH]) reader = tf.TFRecordReader() - _, serialized_example = reader.read(filename_queue) + _, serialized_example = reader.read(filename_queue) # 定义数据解析格式。 features = tf.parse_single_example( @@ -44,7 +44,7 @@ def get_input(): # 定义输入队列并返回。 min_after_dequeue = 10000 capacity = min_after_dequeue + 3 * BATCH_SIZE - return tf.train.shuffle_batch( + return tf.train.shuffle_batch( [retyped_image, label], batch_size=BATCH_SIZE, capacity=capacity, @@ -52,9 +52,9 @@ def get_input(): # 定义损失函数。 def get_loss(x, y_, regularizer, scope): - y = mnist_inference.inference(x, regularizer) - cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y, y_)) - regularization_loss = tf.add_n(tf.get_collection('losses', scope)) + y = mnist_inference.inference(x, regularizer) + cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y, y_)) + regularization_loss = tf.add_n(tf.get_collection('losses', scope)) loss = cross_entropy + regularization_loss return loss @@ -82,7 +82,7 @@ def average_gradients(tower_grads): # 主训练过程。 def main(argv=None): # 将简单的运算放在CPU上,只有神经网络的训练过程放在GPU上。 - with tf.Graph().as_default(), tf.device('/cpu:0'): + with tf.Graph().as_default(), tf.device('/cpu:0'): # 定义基本的训练过程 x, y_ = get_input() @@ -110,12 +110,12 @@ def main(argv=None): grads = average_gradients(tower_grads) for grad, var in grads: if grad is not None: - tf.histogram_summary('gradients_on_average/%s' % var.op.name, grad) + tf.summary.histogram('gradients_on_average/%s' % var.op.name, grad) # 使用平均梯度更新参数。 apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) for var in tf.trainable_variables(): - tf.histogram_summary(var.op.name, var) + tf.summary.histogram(var.op.name, var) # 计算变量的滑动平均值。 variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) @@ -123,16 +123,16 @@ def main(argv=None): # 每一轮迭代需要更新变量的取值并更新变量的滑动平均值。 train_op = tf.group(apply_gradient_op, variables_averages_op) - saver = tf.train.Saver(tf.all_variables()) - summary_op = tf.merge_all_summaries() - init = tf.initialize_all_variables() + saver = tf.train.Saver(tf.global_variables()) + summary_op = tf.summary.merge_all() + init = tf.global_variables_initializer() with tf.Session(config=tf.ConfigProto( allow_soft_placement=True, log_device_placement=True)) as sess: # 初始化所有变量并启动队列。 init.run() coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) - summary_writer = tf.train.SummaryWriter(MODEL_SAVE_PATH, sess.graph) + summary_writer = tf.summary.FileWriter(MODEL_SAVE_PATH, sess.graph) for step in range(TRAINING_STEPS): # 执行神经网络训练操作,并记录训练操作的运行时间。 diff --git "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/1. MNIST\350\257\273\345\217\226\346\225\260\346\215\256.ipynb" "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/1. MNIST\350\257\273\345\217\226\346\225\260\346\215\256.ipynb" index e7702a8e..f7d98e78 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/1. MNIST\350\257\273\345\217\226\346\225\260\346\215\256.ipynb" +++ "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/1. MNIST\350\257\273\345\217\226\346\225\260\346\215\256.ipynb" @@ -34,7 +34,7 @@ ], "source": [ "from tensorflow.examples.tutorials.mnist import input_data\n", - "mnist = input_data.read_data_sets(\"../../Data_sets/MNIST_data/\", one_hot=True)" + "mnist = input_data.read_data_sets(\"../../datasets/MNIST_data/\", one_hot=True)" ] }, { diff --git "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/1. \345\205\250\346\250\241\345\236\213.ipynb" "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/1. \345\205\250\346\250\241\345\236\213.ipynb" index f6fac1ee..ba8cc2cf 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/1. \345\205\250\346\250\241\345\236\213.ipynb" +++ "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/1. \345\205\250\346\250\241\345\236\213.ipynb" @@ -183,7 +183,7 @@ ], "source": [ "def main(argv=None):\n", - " mnist = input_data.read_data_sets(\"../../../Data_sets/MNIST_data\", one_hot=True)\n", + " mnist = input_data.read_data_sets(\"../../../datasets/MNIST_data\", one_hot=True)\n", " train(mnist)\n", "\n", "if __name__=='__main__':\n", diff --git "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/2. \344\270\215\344\275\277\347\224\250\346\255\243\345\210\231\345\214\226.ipynb" "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/2. \344\270\215\344\275\277\347\224\250\346\255\243\345\210\231\345\214\226.ipynb" index 29c06598..af5ae86c 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/2. \344\270\215\344\275\277\347\224\250\346\255\243\345\210\231\345\214\226.ipynb" +++ "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/2. \344\270\215\344\275\277\347\224\250\346\255\243\345\210\231\345\214\226.ipynb" @@ -181,7 +181,7 @@ ], "source": [ "def main(argv=None):\n", - " mnist = input_data.read_data_sets(\"../../../Data_sets/MNIST_data\", one_hot=True)\n", + " mnist = input_data.read_data_sets(\"../../../datasets/MNIST_data\", one_hot=True)\n", " train(mnist)\n", "\n", "if __name__=='__main__':\n", diff --git "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/3. \344\270\215\344\275\277\347\224\250\346\214\207\346\225\260\350\241\260\345\207\217\347\232\204\345\255\246\344\271\240\347\216\207.ipynb" "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/3. \344\270\215\344\275\277\347\224\250\346\214\207\346\225\260\350\241\260\345\207\217\347\232\204\345\255\246\344\271\240\347\216\207.ipynb" index 256bdbb2..24d988e1 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/3. \344\270\215\344\275\277\347\224\250\346\214\207\346\225\260\350\241\260\345\207\217\347\232\204\345\255\246\344\271\240\347\216\207.ipynb" +++ "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/3. \344\270\215\344\275\277\347\224\250\346\214\207\346\225\260\350\241\260\345\207\217\347\232\204\345\255\246\344\271\240\347\216\207.ipynb" @@ -174,7 +174,7 @@ ], "source": [ "def main(argv=None):\n", - " mnist = input_data.read_data_sets(\"../../../Data_sets/MNIST_data\", one_hot=True)\n", + " mnist = input_data.read_data_sets(\"../../../datasets/MNIST_data\", one_hot=True)\n", " train(mnist)\n", "\n", "if __name__=='__main__':\n", diff --git "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/4. \344\270\215\344\275\277\347\224\250\346\277\200\346\264\273\345\207\275\346\225\260.ipynb" "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/4. \344\270\215\344\275\277\347\224\250\346\277\200\346\264\273\345\207\275\346\225\260.ipynb" index 9688eaab..26398765 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/4. \344\270\215\344\275\277\347\224\250\346\277\200\346\264\273\345\207\275\346\225\260.ipynb" +++ "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/4. \344\270\215\344\275\277\347\224\250\346\277\200\346\264\273\345\207\275\346\225\260.ipynb" @@ -183,7 +183,7 @@ ], "source": [ "def main(argv=None):\n", - " mnist = input_data.read_data_sets(\"../../../Data_sets/MNIST_data\", one_hot=True)\n", + " mnist = input_data.read_data_sets(\"../../../datasets/MNIST_data\", one_hot=True)\n", " train(mnist)\n", "\n", "if __name__=='__main__':\n", diff --git "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/5. \344\270\215\344\275\277\347\224\250\351\232\220\350\227\217\345\261\202.ipynb" "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/5. \344\270\215\344\275\277\347\224\250\351\232\220\350\227\217\345\261\202.ipynb" index fa0f00c1..80054e87 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/5. \344\270\215\344\275\277\347\224\250\351\232\220\350\227\217\345\261\202.ipynb" +++ "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/5. \344\270\215\344\275\277\347\224\250\351\232\220\350\227\217\345\261\202.ipynb" @@ -180,7 +180,7 @@ ], "source": [ "def main(argv=None):\n", - " mnist = input_data.read_data_sets(\"../../../Data_sets/MNIST_data\", one_hot=True)\n", + " mnist = input_data.read_data_sets(\"../../../datasets/MNIST_data\", one_hot=True)\n", " train(mnist)\n", "\n", "if __name__=='__main__':\n", diff --git "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/6. \344\270\215\344\275\277\347\224\250\346\273\221\345\212\250\345\271\263\345\235\207.ipynb" "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/6. \344\270\215\344\275\277\347\224\250\346\273\221\345\212\250\345\271\263\345\235\207.ipynb" index 3b944636..826750fc 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/6. \344\270\215\344\275\277\347\224\250\346\273\221\345\212\250\345\271\263\345\235\207.ipynb" +++ "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/2. TensorFlow\350\256\255\347\273\203\347\245\236\347\273\217\347\275\221\347\273\234/6. \344\270\215\344\275\277\347\224\250\346\273\221\345\212\250\345\271\263\345\235\207.ipynb" @@ -178,7 +178,7 @@ ], "source": [ "def main(argv=None):\n", - " mnist = input_data.read_data_sets(\"../../../Data_sets/MNIST_data\", one_hot=True)\n", + " mnist = input_data.read_data_sets(\"../../../datasets/MNIST_data\", one_hot=True)\n", " train(mnist)\n", "\n", "if __name__=='__main__':\n", diff --git "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/5. MNIST\346\234\200\344\275\263\345\256\236\350\267\265/mnist_eval.ipynb" "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/5. MNIST\346\234\200\344\275\263\345\256\236\350\267\265/mnist_eval.ipynb" index 7e301832..ef18cd24 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/5. MNIST\346\234\200\344\275\263\345\256\236\350\267\265/mnist_eval.ipynb" +++ "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/5. MNIST\346\234\200\344\275\263\345\256\236\350\267\265/mnist_eval.ipynb" @@ -91,7 +91,7 @@ ], "source": [ "def main(argv=None):\n", - " mnist = input_data.read_data_sets(\"../../../Data_sets/MNIST_data\", one_hot=True)\n", + " mnist = input_data.read_data_sets(\"../../../datasets/MNIST_data\", one_hot=True)\n", " evaluate(mnist)\n", "\n", "if __name__ == '__main__':\n", diff --git "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/5. MNIST\346\234\200\344\275\263\345\256\236\350\267\265/mnist_train.ipynb" "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/5. MNIST\346\234\200\344\275\263\345\256\236\350\267\265/mnist_train.ipynb" index eaa017f4..fba89fc6 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/5. MNIST\346\234\200\344\275\263\345\256\236\350\267\265/mnist_train.ipynb" +++ "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter05/5. MNIST\346\234\200\344\275\263\345\256\236\350\267\265/mnist_train.ipynb" @@ -148,7 +148,7 @@ ], "source": [ "def main(argv=None):\n", - " mnist = input_data.read_data_sets(\"../../../Data_sets/MNIST_data\", one_hot=True)\n", + " mnist = input_data.read_data_sets(\"../../../datasets/MNIST_data\", one_hot=True)\n", " train(mnist)\n", "\n", "if __name__ == '__main__':\n", diff --git "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter06/2. \350\277\201\347\247\273\345\255\246\344\271\240.ipynb" "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter06/2. \350\277\201\347\247\273\345\255\246\344\271\240.ipynb" index b087c48e..6f9ccc02 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter06/2. \350\277\201\347\247\273\345\255\246\344\271\240.ipynb" +++ "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter06/2. \350\277\201\347\247\273\345\255\246\344\271\240.ipynb" @@ -36,11 +36,11 @@ "JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'\n", "\n", "\n", - "MODEL_DIR = '../../Data_sets/inception_dec_2015'\n", + "MODEL_DIR = '../../datasets/inception_dec_2015'\n", "MODEL_FILE= 'tensorflow_inception_graph.pb'\n", "\n", - "CACHE_DIR = '../../Data_sets/bottleneck'\n", - "INPUT_DATA = '../../Data_sets/flower_photos'\n", + "CACHE_DIR = '../../datasets/bottleneck'\n", + "INPUT_DATA = '../../datasets/flower_photos'\n", "\n", "VALIDATION_PERCENTAGE = 10\n", "TEST_PERCENTAGE = 10" diff --git a/Deep_Learning_with_TensorFlow/0.12.1/Chapter06/LeNet-5/LeNet5_train.ipynb b/Deep_Learning_with_TensorFlow/0.12.1/Chapter06/LeNet-5/LeNet5_train.ipynb index 790d9ea4..aa9db5fc 100644 --- a/Deep_Learning_with_TensorFlow/0.12.1/Chapter06/LeNet-5/LeNet5_train.ipynb +++ b/Deep_Learning_with_TensorFlow/0.12.1/Chapter06/LeNet-5/LeNet5_train.ipynb @@ -140,7 +140,7 @@ ], "source": [ "def main(argv=None):\n", - " mnist = input_data.read_data_sets(\"../../../Data_sets/MNIST_data\", one_hot=True)\n", + " mnist = input_data.read_data_sets(\"../../../datasets/MNIST_data\", one_hot=True)\n", " train(mnist)\n", "\n", "if __name__ == '__main__':\n", diff --git "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter07/1. TFRecord\346\240\267\344\276\213\347\250\213\345\272\217.ipynb" "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter07/1. TFRecord\346\240\267\344\276\213\347\250\213\345\272\217.ipynb" index f21ee388..294f9bb8 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter07/1. TFRecord\346\240\267\344\276\213\347\250\213\345\272\217.ipynb" +++ "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter07/1. TFRecord\346\240\267\344\276\213\347\250\213\345\272\217.ipynb" @@ -48,7 +48,7 @@ " return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n", "\n", "# 读取mnist数据。\n", - "mnist = input_data.read_data_sets(\"../../Data_sets/MNIST_data\",dtype=tf.uint8, one_hot=True)\n", + "mnist = input_data.read_data_sets(\"../../datasets/MNIST_data\",dtype=tf.uint8, one_hot=True)\n", "images = mnist.train.images\n", "labels = mnist.train.labels\n", "pixels = images.shape[1]\n", diff --git "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter07/2.1. TensorFlow\345\233\276\345\203\217\345\244\204\347\220\206\345\207\275\346\225\260.ipynb" "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter07/2.1. TensorFlow\345\233\276\345\203\217\345\244\204\347\220\206\345\207\275\346\225\260.ipynb" index e88fbc01..40e85092 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter07/2.1. TensorFlow\345\233\276\345\203\217\345\244\204\347\220\206\345\207\275\346\225\260.ipynb" +++ "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter07/2.1. TensorFlow\345\233\276\345\203\217\345\244\204\347\220\206\345\207\275\346\225\260.ipynb" @@ -84,7 +84,7 @@ } ], "source": [ - "image_raw_data = tf.gfile.FastGFile(\"../../Data_sets/cat.jpg\",'r').read()\n", + "image_raw_data = tf.gfile.FastGFile(\"../../datasets/cat.jpg\",'r').read()\n", "\n", "with tf.Session() as sess:\n", " img_data = tf.image.decode_jpeg(image_raw_data)\n", diff --git "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter07/2.2. \345\233\276\345\203\217\351\242\204\345\244\204\347\220\206\345\256\214\346\225\264\346\240\267\344\276\213.ipynb" "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter07/2.2. \345\233\276\345\203\217\351\242\204\345\244\204\347\220\206\345\256\214\346\225\264\346\240\267\344\276\213.ipynb" index e5ab2340..c6e89155 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter07/2.2. \345\233\276\345\203\217\351\242\204\345\244\204\347\220\206\345\256\214\346\225\264\346\240\267\344\276\213.ipynb" +++ "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter07/2.2. \345\233\276\345\203\217\351\242\204\345\244\204\347\220\206\345\256\214\346\225\264\346\240\267\344\276\213.ipynb" @@ -185,7 +185,7 @@ } ], "source": [ - "image_raw_data = tf.gfile.FastGFile(\"../../Data_sets/cat.jpg\", \"r\").read()\n", + "image_raw_data = tf.gfile.FastGFile(\"../../datasets/cat.jpg\", \"r\").read()\n", "with tf.Session() as sess:\n", " img_data = tf.image.decode_jpeg(image_raw_data)\n", " boxes = tf.constant([[[0.05, 0.05, 0.9, 0.7], [0.35, 0.47, 0.5, 0.56]]])\n", diff --git "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter08/2. PTB\346\225\260\346\215\256\351\233\206\344\273\213\347\273\215.ipynb" "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter08/2. PTB\346\225\260\346\215\256\351\233\206\344\273\213\347\273\215.ipynb" index 7b7650dd..ed5268db 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter08/2. PTB\346\225\260\346\215\256\351\233\206\344\273\213\347\273\215.ipynb" +++ "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter08/2. PTB\346\225\260\346\215\256\351\233\206\344\273\213\347\273\215.ipynb" @@ -36,7 +36,7 @@ } ], "source": [ - "DATA_PATH = \"../../data_sets/PTB_data\"\n", + "DATA_PATH = \"../../datasets/PTB_data\"\n", "train_data, valid_data, test_data, _ = reader.ptb_raw_data(DATA_PATH)\n", "print len(train_data)\n", "print train_data[:100]" diff --git "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter08/3. \344\275\277\347\224\250\345\276\252\347\216\257\347\245\236\347\273\217\347\275\221\347\273\234\345\256\236\347\216\260\350\257\255\350\250\200\346\250\241\345\236\213.ipynb" "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter08/3. \344\275\277\347\224\250\345\276\252\347\216\257\347\245\236\347\273\217\347\275\221\347\273\234\345\256\236\347\216\260\350\257\255\350\250\200\346\250\241\345\236\213.ipynb" index b35d8ac9..49a7241a 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter08/3. \344\275\277\347\224\250\345\276\252\347\216\257\347\245\236\347\273\217\347\275\221\347\273\234\345\256\236\347\216\260\350\257\255\350\250\200\346\250\241\345\236\213.ipynb" +++ "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter08/3. \344\275\277\347\224\250\345\276\252\347\216\257\347\245\236\347\273\217\347\275\221\347\273\234\345\256\236\347\216\260\350\257\255\350\250\200\346\250\241\345\236\213.ipynb" @@ -28,7 +28,7 @@ }, "outputs": [], "source": [ - "DATA_PATH = \"../../data_sets/PTB_data\"\n", + "DATA_PATH = \"../../datasets/PTB_data\"\n", "HIDDEN_SIZE = 200\n", "NUM_LAYERS = 2\n", "VOCAB_SIZE = 10000\n", diff --git "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter10/2. \345\244\232GPU\345\271\266\350\241\214.py" "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter10/2. \345\244\232GPU\345\271\266\350\241\214.py" index 416f1980..b1677c2c 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter10/2. \345\244\232GPU\345\271\266\350\241\214.py" +++ "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter10/2. \345\244\232GPU\345\271\266\350\241\214.py" @@ -16,15 +16,15 @@ N_GPU = 4 # 定义日志和模型输出的路径。 -MODEL_SAVE_PATH = "/path/to/logs_and_models/" +MODEL_SAVE_PATH = "logs_and_models/" MODEL_NAME = "model.ckpt" -DATA_PATH = "/path/to/data.tfrecords" +DATA_PATH = "output.tfrecords" # 定义输入队列得到训练数据,具体细节可以参考第七章。 def get_input(): filename_queue = tf.train.string_input_producer([DATA_PATH]) reader = tf.TFRecordReader() - _, serialized_example = reader.read(filename_queue) + _, serialized_example = reader.read(filename_queue) # 定义数据解析格式。 features = tf.parse_single_example( @@ -44,7 +44,7 @@ def get_input(): # 定义输入队列并返回。 min_after_dequeue = 10000 capacity = min_after_dequeue + 3 * BATCH_SIZE - return tf.train.shuffle_batch( + return tf.train.shuffle_batch( [retyped_image, label], batch_size=BATCH_SIZE, capacity=capacity, @@ -52,9 +52,9 @@ def get_input(): # 定义损失函数。 def get_loss(x, y_, regularizer, scope): - y = mnist_inference.inference(x, regularizer) - cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y, y_)) - regularization_loss = tf.add_n(tf.get_collection('losses', scope)) + y = mnist_inference.inference(x, regularizer) + cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y, y_)) + regularization_loss = tf.add_n(tf.get_collection('losses', scope)) loss = cross_entropy + regularization_loss return loss @@ -82,7 +82,7 @@ def average_gradients(tower_grads): # 主训练过程。 def main(argv=None): # 将简单的运算放在CPU上,只有神经网络的训练过程放在GPU上。 - with tf.Graph().as_default(), tf.device('/cpu:0'): + with tf.Graph().as_default(), tf.device('/cpu:0'): # 定义基本的训练过程 x, y_ = get_input() @@ -110,12 +110,12 @@ def main(argv=None): grads = average_gradients(tower_grads) for grad, var in grads: if grad is not None: - tf.histogram_summary('gradients_on_average/%s' % var.op.name, grad) + tf.summary.histogram('gradients_on_average/%s' % var.op.name, grad) # 使用平均梯度更新参数。 apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) for var in tf.trainable_variables(): - tf.histogram_summary(var.op.name, var) + tf.summary.histogram(var.op.name, var) # 计算变量的滑动平均值。 variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) @@ -123,16 +123,16 @@ def main(argv=None): # 每一轮迭代需要更新变量的取值并更新变量的滑动平均值。 train_op = tf.group(apply_gradient_op, variables_averages_op) - saver = tf.train.Saver(tf.all_variables()) - summary_op = tf.merge_all_summaries() - init = tf.initialize_all_variables() + saver = tf.train.Saver(tf.global_variables()) + summary_op = tf.summary.merge_all() + init = tf.global_variables_initializer() with tf.Session(config=tf.ConfigProto( allow_soft_placement=True, log_device_placement=True)) as sess: # 初始化所有变量并启动队列。 init.run() coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) - summary_writer = tf.train.SummaryWriter(MODEL_SAVE_PATH, sess.graph) + summary_writer = tf.summary.FileWriter(MODEL_SAVE_PATH, sess.graph) for step in range(TRAINING_STEPS): # 执行神经网络训练操作,并记录训练操作的运行时间。 From f3632d403ba55d6a69179e5969e37426565815e9 Mon Sep 17 00:00:00 2001 From: Mingxuan Yi Date: Tue, 14 Feb 2017 18:51:54 +0800 Subject: [PATCH 2/2] Add MSE and update the structure of directories --- ...345\244\261\345\207\275\346\225\260.ipynb" | 115 ++++++++++++++---- ...345\244\261\345\207\275\346\225\260.ipynb" | 112 +++++++++++++---- ...346\255\243\345\210\231\345\214\226.ipynb" | 2 +- 3 files changed, 176 insertions(+), 53 deletions(-) diff --git "a/Deep_Learning_with_TensorFlow/0.12.0/Chapter04/1. \350\207\252\345\256\232\344\271\211\346\215\237\345\244\261\345\207\275\346\225\260.ipynb" "b/Deep_Learning_with_TensorFlow/0.12.0/Chapter04/1. \350\207\252\345\256\232\344\271\211\346\215\237\345\244\261\345\207\275\346\225\260.ipynb" index c0f896d5..6b3d7fb7 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.0/Chapter04/1. \350\207\252\345\256\232\344\271\211\346\215\237\345\244\261\345\207\275\346\225\260.ipynb" +++ "b/Deep_Learning_with_TensorFlow/0.12.0/Chapter04/1. \350\207\252\345\256\232\344\271\211\346\215\237\345\244\261\345\207\275\346\225\260.ipynb" @@ -45,14 +45,14 @@ "cell_type": "code", "execution_count": 3, "metadata": { - "collapsed": true + "collapsed": false }, "outputs": [], "source": [ "# 定义损失函数使得预测少了的损失大,于是模型应该偏向多的方向预测。\n", "loss_less = 10\n", "loss_more = 1\n", - "loss = tf.reduce_sum(tf.select(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less))\n", + "loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less))\n", "train_step = tf.train.AdamOptimizer(0.001).minimize(loss)" ] }, @@ -73,7 +73,7 @@ "source": [ "rdm = RandomState(1)\n", "X = rdm.rand(128,2)\n", - "Y = [[x1+x2+rdm.rand()] for (x1, x2) in X]" + "Y = [[x1+x2+rdm.rand()/10.0-0.05] for (x1, x2) in X]" ] }, { @@ -99,24 +99,24 @@ " [ 1.4855988 ]] \n", "\n", "After 1000 training step(s), w1 is: \n", - "[[ 0.10801763]\n", - " [ 2.33051562]] \n", + "[[ 0.01247112]\n", + " [ 2.1385448 ]] \n", "\n", "After 2000 training step(s), w1 is: \n", - "[[ 0.69744456]\n", - " [ 2.66114545]] \n", + "[[ 0.45567414]\n", + " [ 2.17060661]] \n", "\n", "After 3000 training step(s), w1 is: \n", - "[[ 1.01532638]\n", - " [ 2.6881988 ]] \n", + "[[ 0.69968724]\n", + " [ 1.8465308 ]] \n", "\n", "After 4000 training step(s), w1 is: \n", - "[[ 1.29930842]\n", - " [ 2.56087017]] \n", + "[[ 0.89886665]\n", + " [ 1.29736018]] \n", "\n", "Final w1 is: \n", - "[[ 1.50898325]\n", - " [ 2.31391478]]\n" + "[[ 1.01934695]\n", + " [ 1.04280889]]\n" ] } ], @@ -154,35 +154,97 @@ "output_type": "stream", "text": [ "After 0 training step(s), w1 is: \n", - "[[-0.81031823]\n", - " [ 1.4855988 ]] \n", + "[[-0.81231821]\n", + " [ 1.48359871]] \n", "\n", "After 1000 training step(s), w1 is: \n", - "[[ 0.06754676]\n", - " [ 1.68602526]] \n", + "[[ 0.18643527]\n", + " [ 1.07393336]] \n", "\n", "After 2000 training step(s), w1 is: \n", - "[[ 0.62151301]\n", - " [ 1.49415982]] \n", + "[[ 0.95444274]\n", + " [ 0.98088616]] \n", "\n", "After 3000 training step(s), w1 is: \n", - "[[ 0.85797542]\n", - " [ 1.32460475]] \n", + "[[ 0.95574027]\n", + " [ 0.9806633 ]] \n", "\n", "After 4000 training step(s), w1 is: \n", - "[[ 1.01017809]\n", - " [ 1.2008121 ]] \n", + "[[ 0.95466018]\n", + " [ 0.98135227]] \n", "\n", "Final w1 is: \n", - "[[ 1.08652937]\n", - " [ 1.13757098]]\n" + "[[ 0.95525807]\n", + " [ 0.9813394 ]]\n" ] } ], "source": [ "loss_less = 1\n", "loss_more = 10\n", - "loss = tf.reduce_sum(tf.select(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less))\n", + "loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less))\n", + "train_step = tf.train.AdamOptimizer(0.001).minimize(loss)\n", + "\n", + "with tf.Session() as sess:\n", + " init_op = tf.global_variables_initializer()\n", + " sess.run(init_op)\n", + " STEPS = 5000\n", + " for i in range(STEPS):\n", + " start = (i*batch_size) % 128\n", + " end = (i*batch_size) % 128 + batch_size\n", + " sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})\n", + " if i % 1000 == 0:\n", + " print(\"After %d training step(s), w1 is: \" % (i))\n", + " print sess.run(w1), \"\\n\"\n", + " print \"Final w1 is: \\n\", sess.run(w1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 6. 定义损失函数为MSE。" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "After 0 training step(s), w1 is: \n", + "[[-0.81031823]\n", + " [ 1.4855988 ]] \n", + "\n", + "After 1000 training step(s), w1 is: \n", + "[[-0.13337609]\n", + " [ 1.81309223]] \n", + "\n", + "After 2000 training step(s), w1 is: \n", + "[[ 0.32190299]\n", + " [ 1.52463484]] \n", + "\n", + "After 3000 training step(s), w1 is: \n", + "[[ 0.67850214]\n", + " [ 1.25297272]] \n", + "\n", + "After 4000 training step(s), w1 is: \n", + "[[ 0.89473999]\n", + " [ 1.08598232]] \n", + "\n", + "Final w1 is: \n", + "[[ 0.97437561]\n", + " [ 1.0243336 ]]\n" + ] + } + ], + "source": [ + "loss = tf.losses.mean_squared_error(y, y_)\n", "train_step = tf.train.AdamOptimizer(0.001).minimize(loss)\n", "\n", "with tf.Session() as sess:\n", @@ -222,4 +284,3 @@ "nbformat": 4, "nbformat_minor": 1 } - diff --git "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter04/1. \350\207\252\345\256\232\344\271\211\346\215\237\345\244\261\345\207\275\346\225\260.ipynb" "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter04/1. \350\207\252\345\256\232\344\271\211\346\215\237\345\244\261\345\207\275\346\225\260.ipynb" index 255392ca..5673bd5b 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter04/1. \350\207\252\345\256\232\344\271\211\346\215\237\345\244\261\345\207\275\346\225\260.ipynb" +++ "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter04/1. \350\207\252\345\256\232\344\271\211\346\215\237\345\244\261\345\207\275\346\225\260.ipynb" @@ -52,7 +52,7 @@ "# 定义损失函数使得预测少了的损失大,于是模型应该偏向多的方向预测。\n", "loss_less = 10\n", "loss_more = 1\n", - "loss = tf.reduce_sum(tf.select(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less))\n", + "loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less))\n", "train_step = tf.train.AdamOptimizer(0.001).minimize(loss)" ] }, @@ -73,7 +73,7 @@ "source": [ "rdm = RandomState(1)\n", "X = rdm.rand(128,2)\n", - "Y = [[x1+x2+rdm.rand()] for (x1, x2) in X]" + "Y = [[x1+x2+rdm.rand()/10.0-0.05] for (x1, x2) in X]" ] }, { @@ -99,24 +99,24 @@ " [ 1.4855988 ]] \n", "\n", "After 1000 training step(s), w1 is: \n", - "[[ 0.10801763]\n", - " [ 2.33051562]] \n", + "[[ 0.01247112]\n", + " [ 2.1385448 ]] \n", "\n", "After 2000 training step(s), w1 is: \n", - "[[ 0.69744456]\n", - " [ 2.66114545]] \n", + "[[ 0.45567414]\n", + " [ 2.17060661]] \n", "\n", "After 3000 training step(s), w1 is: \n", - "[[ 1.01532638]\n", - " [ 2.6881988 ]] \n", + "[[ 0.69968724]\n", + " [ 1.8465308 ]] \n", "\n", "After 4000 training step(s), w1 is: \n", - "[[ 1.29930842]\n", - " [ 2.56087017]] \n", + "[[ 0.89886665]\n", + " [ 1.29736018]] \n", "\n", "Final w1 is: \n", - "[[ 1.50898325]\n", - " [ 2.31391478]]\n" + "[[ 1.01934695]\n", + " [ 1.04280889]]\n" ] } ], @@ -154,35 +154,97 @@ "output_type": "stream", "text": [ "After 0 training step(s), w1 is: \n", - "[[-0.81031823]\n", - " [ 1.4855988 ]] \n", + "[[-0.81231821]\n", + " [ 1.48359871]] \n", "\n", "After 1000 training step(s), w1 is: \n", - "[[ 0.06754676]\n", - " [ 1.68602526]] \n", + "[[ 0.18643527]\n", + " [ 1.07393336]] \n", "\n", "After 2000 training step(s), w1 is: \n", - "[[ 0.62151301]\n", - " [ 1.49415982]] \n", + "[[ 0.95444274]\n", + " [ 0.98088616]] \n", "\n", "After 3000 training step(s), w1 is: \n", - "[[ 0.85797542]\n", - " [ 1.32460475]] \n", + "[[ 0.95574027]\n", + " [ 0.9806633 ]] \n", "\n", "After 4000 training step(s), w1 is: \n", - "[[ 1.01017809]\n", - " [ 1.2008121 ]] \n", + "[[ 0.95466018]\n", + " [ 0.98135227]] \n", "\n", "Final w1 is: \n", - "[[ 1.08652937]\n", - " [ 1.13757098]]\n" + "[[ 0.95525807]\n", + " [ 0.9813394 ]]\n" ] } ], "source": [ "loss_less = 1\n", "loss_more = 10\n", - "loss = tf.reduce_sum(tf.select(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less))\n", + "loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less))\n", + "train_step = tf.train.AdamOptimizer(0.001).minimize(loss)\n", + "\n", + "with tf.Session() as sess:\n", + " init_op = tf.global_variables_initializer()\n", + " sess.run(init_op)\n", + " STEPS = 5000\n", + " for i in range(STEPS):\n", + " start = (i*batch_size) % 128\n", + " end = (i*batch_size) % 128 + batch_size\n", + " sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})\n", + " if i % 1000 == 0:\n", + " print(\"After %d training step(s), w1 is: \" % (i))\n", + " print sess.run(w1), \"\\n\"\n", + " print \"Final w1 is: \\n\", sess.run(w1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 6. 定义损失函数为MSE。" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "After 0 training step(s), w1 is: \n", + "[[-0.81031823]\n", + " [ 1.4855988 ]] \n", + "\n", + "After 1000 training step(s), w1 is: \n", + "[[-0.13337609]\n", + " [ 1.81309223]] \n", + "\n", + "After 2000 training step(s), w1 is: \n", + "[[ 0.32190299]\n", + " [ 1.52463484]] \n", + "\n", + "After 3000 training step(s), w1 is: \n", + "[[ 0.67850214]\n", + " [ 1.25297272]] \n", + "\n", + "After 4000 training step(s), w1 is: \n", + "[[ 0.89473999]\n", + " [ 1.08598232]] \n", + "\n", + "Final w1 is: \n", + "[[ 0.97437561]\n", + " [ 1.0243336 ]]\n" + ] + } + ], + "source": [ + "loss = tf.losses.mean_squared_error(y, y_)\n", "train_step = tf.train.AdamOptimizer(0.001).minimize(loss)\n", "\n", "with tf.Session() as sess:\n", diff --git "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter04/3. \346\255\243\345\210\231\345\214\226.ipynb" "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter04/3. \346\255\243\345\210\231\345\214\226.ipynb" index 271aad9c..7ba4cfdc 100644 --- "a/Deep_Learning_with_TensorFlow/0.12.1/Chapter04/3. \346\255\243\345\210\231\345\214\226.ipynb" +++ "b/Deep_Learning_with_TensorFlow/0.12.1/Chapter04/3. \346\255\243\345\210\231\345\214\226.ipynb" @@ -107,7 +107,7 @@ " out_dimension = layer_dimension[i]\n", " weight = get_weight([in_dimension, out_dimension], 0.003)\n", " bias = tf.Variable(tf.constant(0.1, shape=[out_dimension]))\n", - " cur_layer = tf.nn.relu(tf.matmul(cur_layer, weight) + bias)\n", + " cur_layer = tf.nn.elu(tf.matmul(cur_layer, weight) + bias)\n", " in_dimension = layer_dimension[i]\n", "\n", "y= cur_layer\n",