From b9238e35dcf942204ae7e3a22b0e43cb2bd76fe2 Mon Sep 17 00:00:00 2001 From: udlbook <110402648+udlbook@users.noreply.github.com> Date: Mon, 13 Nov 2023 21:27:38 +0000 Subject: [PATCH] Created using Colaboratory --- Notebooks/Chap07/7_2_Backpropagation.ipynb | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Notebooks/Chap07/7_2_Backpropagation.ipynb b/Notebooks/Chap07/7_2_Backpropagation.ipynb index c52cf465..93c928c4 100644 --- a/Notebooks/Chap07/7_2_Backpropagation.ipynb +++ b/Notebooks/Chap07/7_2_Backpropagation.ipynb @@ -4,7 +4,7 @@ "metadata": { "colab": { "provenance": [], - "authorship_tag": "ABX9TyN2nPVR0imZntgj4Oasyvmo", + "authorship_tag": "ABX9TyM8DZv6WppyaQxi8igoKV+X", "include_colab_link": true }, "kernelspec": { @@ -143,7 +143,7 @@ " # Run through the layers, calculating all_f[0...K-1] and all_h[1...K]\n", " for layer in range(K):\n", " # Update preactivations and activations at this layer according to eqn 7.16\n", - " # Remember to use np.matmul for matrix multiplications\n", + " # Remmember to use np.matmul for matrrix multiplications\n", " # TODO -- Replace the lines below\n", " all_f[layer] = all_h[layer]\n", " all_h[layer+1] = all_f[layer]\n", @@ -244,28 +244,28 @@ " all_dl_dh = [None] * (K+1)\n", " # Again for convenience we'll stick with the convention that all_h[0] is the net input and all_f[k] in the net output\n", "\n", - " # Compute derivatives of net output with respect to loss\n", + " # Compute derivatives of the loss with respect to the network output\n", " all_dl_df[K] = np.array(d_loss_d_output(all_f[K],y))\n", "\n", " # Now work backwards through the network\n", " for layer in range(K,-1,-1):\n", - " # TODO Calculate the derivatives of biases at layer this from all_dl_df[layer]. (eq 7.21)\n", + " # TODO Calculate the derivatives of the loss with respect to the biases at layer this from all_dl_df[layer]. (eq 7.21)\n", " # NOTE! To take a copy of matrix X, use Z=np.array(X)\n", " # REPLACE THIS LINE\n", " all_dl_dbiases[layer] = np.zeros_like(all_biases[layer])\n", "\n", - " # TODO Calculate the derivatives of weight at layer from all_dl_df[K] and all_h[K] (eq 7.22)\n", + " # TODO Calculate the derivatives of the loss with respect to the weights at layer from all_dl_df[K] and all_h[K] (eq 7.22)\n", " # Don't forget to use np.matmul\n", " # REPLACE THIS LINE\n", " all_dl_dweights[layer] = np.zeros_like(all_weights[layer])\n", "\n", - " # TODO: calculate the derivatives of activations from weight and derivatives of next preactivations (eq 7.20)\n", + " # TODO: calculate the derivatives of the loss with respect to th eactivations from weight and derivatives of next preactivations (eq 7.20)\n", " # REPLACE THIS LINE\n", " all_dl_dh[layer] = np.zeros_like(all_h[layer])\n", "\n", "\n", " if layer > 0:\n", - " # TODO Calculate the derivatives of the pre-activation f with respect to activation h (deriv of ReLu function)\n", + " # TODO Calculate the derivatives of the loss with respect to the pre-activation f with respect to activation h (deriv of ReLu function)\n", " # REPLACE THIS LINE\n", " all_dl_df[layer-1] = np.zeros_like(all_f[layer-1])\n", "\n", @@ -299,7 +299,7 @@ "# Let's test if we have the derivatives right using finite differences\n", "delta_fd = 0.000001\n", "\n", - "# Test the derivatives of the bias vectors\n", + "# Test the dervatives of the bias vectors\n", "for layer in range(K):\n", " dl_dbias = np.zeros_like(all_dl_dbiases[layer])\n", " # For every element in the bias\n",