Skip to content

Commit

Permalink
fix bug
Browse files Browse the repository at this point in the history
  • Loading branch information
perhapszzy authored and perhapszzy committed Jan 5, 2018
1 parent ac44fb4 commit 38fba9b
Show file tree
Hide file tree
Showing 4 changed files with 138 additions and 94 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": false
"collapsed": true
},
"outputs": [],
"source": [
Expand Down Expand Up @@ -51,7 +51,9 @@
"source": [
"a = tf.matmul(x, w1)\n",
"y = tf.matmul(a, w2)\n",
"cross_entropy = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0))) \n",
"y = tf.sigmoid(y)\n",
"cross_entropy = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0))\n",
" + (1 - y_) * tf.log(tf.clip_by_value(1 - y, 1e-10, 1.0)))\n",
"train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)"
]
},
Expand Down Expand Up @@ -85,33 +87,31 @@
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": false
},
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"w1: [[-0.81131822 1.48459876 0.06532937]\n",
" [-2.44270396 0.0992484 0.59122431]]\n",
"w2: [[-0.81131822]\n",
"[[-0.81131822 1.48459876 0.06532937]\n",
" [-2.4427042 0.0992484 0.59122431]]\n",
"[[-0.81131822]\n",
" [ 1.48459876]\n",
" [ 0.06532937]]\n",
"\n",
"\n",
"After 0 training step(s), cross entropy on all data is 0.0674925\n",
"After 1000 training step(s), cross entropy on all data is 0.0163385\n",
"After 2000 training step(s), cross entropy on all data is 0.00907547\n",
"After 3000 training step(s), cross entropy on all data is 0.00714436\n",
"After 4000 training step(s), cross entropy on all data is 0.00578471\n",
"After 0 training step(s), cross entropy on all data is 1.89805\n",
"After 1000 training step(s), cross entropy on all data is 0.655075\n",
"After 2000 training step(s), cross entropy on all data is 0.626172\n",
"After 3000 training step(s), cross entropy on all data is 0.615096\n",
"After 4000 training step(s), cross entropy on all data is 0.610309\n",
"\n",
"\n",
"w1: [[-1.9618274 2.58235407 1.68203783]\n",
" [-3.4681716 1.06982327 2.11788988]]\n",
"w2: [[-1.8247149 ]\n",
" [ 2.68546653]\n",
" [ 1.41819501]]\n"
"[[ 0.02476984 0.5694868 1.69219422]\n",
" [-2.19773483 -0.23668921 1.11438966]]\n",
"[[-0.45544702]\n",
" [ 0.49110931]\n",
" [-0.9811033 ]]\n"
]
}
],
Expand All @@ -121,25 +121,34 @@
" sess.run(init_op)\n",
" \n",
" # 输出目前(未经训练)的参数取值。\n",
" print \"w1:\", sess.run(w1)\n",
" print \"w2:\", sess.run(w2)\n",
" print \"\\n\"\n",
" print(sess.run(w1))\n",
" print(sess.run(w2))\n",
" print(\"\\n\")\n",
" \n",
" # 训练模型。\n",
" STEPS = 5000\n",
" for i in range(STEPS):\n",
" start = (i*batch_size) % 128\n",
" end = (i*batch_size) % 128 + batch_size\n",
" sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})\n",
" sess.run([train_step, y, y_], feed_dict={x: X[start:end], y_: Y[start:end]})\n",
" if i % 1000 == 0:\n",
" total_cross_entropy = sess.run(cross_entropy, feed_dict={x: X, y_: Y})\n",
" print(\"After %d training step(s), cross entropy on all data is %g\" % (i, total_cross_entropy))\n",
" \n",
" # 输出训练后的参数取值。\n",
" print \"\\n\"\n",
" print \"w1:\", sess.run(w1)\n",
" print \"w2:\", sess.run(w2)"
" print(\"\\n\")\n",
" print(sess.run(w1))\n",
" print(sess.run(w2))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
Expand All @@ -158,7 +167,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.10"
"version": "2.7.13"
}
},
"nbformat": 4,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": false
"collapsed": true
},
"outputs": [],
"source": [
Expand Down Expand Up @@ -51,7 +51,9 @@
"source": [
"a = tf.matmul(x, w1)\n",
"y = tf.matmul(a, w2)\n",
"cross_entropy = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0))) \n",
"y = tf.sigmoid(y)\n",
"cross_entropy = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0))\n",
" + (1 - y_) * tf.log(tf.clip_by_value(1 - y, 1e-10, 1.0)))\n",
"train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)"
]
},
Expand Down Expand Up @@ -85,33 +87,31 @@
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": false
},
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"w1: [[-0.81131822 1.48459876 0.06532937]\n",
" [-2.44270396 0.0992484 0.59122431]]\n",
"w2: [[-0.81131822]\n",
"[[-0.81131822 1.48459876 0.06532937]\n",
" [-2.4427042 0.0992484 0.59122431]]\n",
"[[-0.81131822]\n",
" [ 1.48459876]\n",
" [ 0.06532937]]\n",
"\n",
"\n",
"After 0 training step(s), cross entropy on all data is 0.0674925\n",
"After 1000 training step(s), cross entropy on all data is 0.0163385\n",
"After 2000 training step(s), cross entropy on all data is 0.00907547\n",
"After 3000 training step(s), cross entropy on all data is 0.00714436\n",
"After 4000 training step(s), cross entropy on all data is 0.00578471\n",
"After 0 training step(s), cross entropy on all data is 1.89805\n",
"After 1000 training step(s), cross entropy on all data is 0.655075\n",
"After 2000 training step(s), cross entropy on all data is 0.626172\n",
"After 3000 training step(s), cross entropy on all data is 0.615096\n",
"After 4000 training step(s), cross entropy on all data is 0.610309\n",
"\n",
"\n",
"w1: [[-1.9618274 2.58235407 1.68203783]\n",
" [-3.4681716 1.06982327 2.11788988]]\n",
"w2: [[-1.8247149 ]\n",
" [ 2.68546653]\n",
" [ 1.41819501]]\n"
"[[ 0.02476984 0.5694868 1.69219422]\n",
" [-2.19773483 -0.23668921 1.11438966]]\n",
"[[-0.45544702]\n",
" [ 0.49110931]\n",
" [-0.9811033 ]]\n"
]
}
],
Expand All @@ -121,25 +121,34 @@
" sess.run(init_op)\n",
" \n",
" # 输出目前(未经训练)的参数取值。\n",
" print \"w1:\", sess.run(w1)\n",
" print \"w2:\", sess.run(w2)\n",
" print \"\\n\"\n",
" print(sess.run(w1))\n",
" print(sess.run(w2))\n",
" print(\"\\n\")\n",
" \n",
" # 训练模型。\n",
" STEPS = 5000\n",
" for i in range(STEPS):\n",
" start = (i*batch_size) % 128\n",
" end = (i*batch_size) % 128 + batch_size\n",
" sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})\n",
" sess.run([train_step, y, y_], feed_dict={x: X[start:end], y_: Y[start:end]})\n",
" if i % 1000 == 0:\n",
" total_cross_entropy = sess.run(cross_entropy, feed_dict={x: X, y_: Y})\n",
" print(\"After %d training step(s), cross entropy on all data is %g\" % (i, total_cross_entropy))\n",
" \n",
" # 输出训练后的参数取值。\n",
" print \"\\n\"\n",
" print \"w1:\", sess.run(w1)\n",
" print \"w2:\", sess.run(w2)"
" print(\"\\n\")\n",
" print(sess.run(w1))\n",
" print(sess.run(w2))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
Expand All @@ -158,7 +167,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.10"
"version": "2.7.13"
}
},
"nbformat": 4,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,9 @@
"source": [
"a = tf.matmul(x, w1)\n",
"y = tf.matmul(a, w2)\n",
"cross_entropy = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0))) \n",
"y = tf.sigmoid(y)\n",
"cross_entropy = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0))\n",
" + (1 - y_) * tf.log(tf.clip_by_value(1 - y, 1e-10, 1.0)))\n",
"train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)"
]
},
Expand Down Expand Up @@ -85,31 +87,33 @@
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"w1: [[-0.81131822 1.48459876 0.06532937]\n",
"[[-0.81131822 1.48459876 0.06532937]\n",
" [-2.4427042 0.0992484 0.59122431]]\n",
"w2: [[-0.81131822]\n",
"[[-0.81131822]\n",
" [ 1.48459876]\n",
" [ 0.06532937]]\n",
"\n",
"\n",
"After 0 training step(s), cross entropy on all data is 0.0674925\n",
"After 1000 training step(s), cross entropy on all data is 0.0163385\n",
"After 2000 training step(s), cross entropy on all data is 0.00907547\n",
"After 3000 training step(s), cross entropy on all data is 0.00714436\n",
"After 4000 training step(s), cross entropy on all data is 0.00578471\n",
"After 0 training step(s), cross entropy on all data is 1.89805\n",
"After 1000 training step(s), cross entropy on all data is 0.655075\n",
"After 2000 training step(s), cross entropy on all data is 0.626172\n",
"After 3000 training step(s), cross entropy on all data is 0.615096\n",
"After 4000 training step(s), cross entropy on all data is 0.610309\n",
"\n",
"\n",
"w1: [[-1.96182752 2.58235407 1.68203771]\n",
" [-3.46817183 1.06982315 2.11788988]]\n",
"w2: [[-1.82471502]\n",
" [ 2.68546653]\n",
" [ 1.41819501]]\n"
"[[ 0.02476984 0.5694868 1.69219422]\n",
" [-2.19773483 -0.23668921 1.11438966]]\n",
"[[-0.45544702]\n",
" [ 0.49110931]\n",
" [-0.9811033 ]]\n"
]
}
],
Expand All @@ -119,25 +123,34 @@
" sess.run(init_op)\n",
" \n",
" # 输出目前(未经训练)的参数取值。\n",
" print \"w1:\", sess.run(w1)\n",
" print \"w2:\", sess.run(w2)\n",
" print \"\\n\"\n",
" print(sess.run(w1))\n",
" print(sess.run(w2))\n",
" print(\"\\n\")\n",
" \n",
" # 训练模型。\n",
" STEPS = 5000\n",
" for i in range(STEPS):\n",
" start = (i*batch_size) % 128\n",
" end = (i*batch_size) % 128 + batch_size\n",
" sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})\n",
" sess.run([train_step, y, y_], feed_dict={x: X[start:end], y_: Y[start:end]})\n",
" if i % 1000 == 0:\n",
" total_cross_entropy = sess.run(cross_entropy, feed_dict={x: X, y_: Y})\n",
" print(\"After %d training step(s), cross entropy on all data is %g\" % (i, total_cross_entropy))\n",
" \n",
" # 输出训练后的参数取值。\n",
" print \"\\n\"\n",
" print \"w1:\", sess.run(w1)\n",
" print \"w2:\", sess.run(w2)"
" print(\"\\n\")\n",
" print(sess.run(w1))\n",
" print(sess.run(w2))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
Expand Down
Loading

0 comments on commit 38fba9b

Please sign in to comment.