Skip to content

Commit

Permalink
Created using Colaboratory
Browse files Browse the repository at this point in the history
  • Loading branch information
udlbook committed Nov 13, 2023
1 parent 34235a3 commit b9238e3
Showing 1 changed file with 8 additions and 8 deletions.
16 changes: 8 additions & 8 deletions Notebooks/Chap07/7_2_Backpropagation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"metadata": {
"colab": {
"provenance": [],
"authorship_tag": "ABX9TyN2nPVR0imZntgj4Oasyvmo",
"authorship_tag": "ABX9TyM8DZv6WppyaQxi8igoKV+X",
"include_colab_link": true
},
"kernelspec": {
Expand Down Expand Up @@ -143,7 +143,7 @@
" # Run through the layers, calculating all_f[0...K-1] and all_h[1...K]\n",
" for layer in range(K):\n",
" # Update preactivations and activations at this layer according to eqn 7.16\n",
" # Remember to use np.matmul for matrix multiplications\n",
" # Remmember to use np.matmul for matrrix multiplications\n",
" # TODO -- Replace the lines below\n",
" all_f[layer] = all_h[layer]\n",
" all_h[layer+1] = all_f[layer]\n",
Expand Down Expand Up @@ -244,28 +244,28 @@
" all_dl_dh = [None] * (K+1)\n",
" # Again for convenience we'll stick with the convention that all_h[0] is the net input and all_f[k] in the net output\n",
"\n",
" # Compute derivatives of net output with respect to loss\n",
" # Compute derivatives of the loss with respect to the network output\n",
" all_dl_df[K] = np.array(d_loss_d_output(all_f[K],y))\n",
"\n",
" # Now work backwards through the network\n",
" for layer in range(K,-1,-1):\n",
" # TODO Calculate the derivatives of biases at layer this from all_dl_df[layer]. (eq 7.21)\n",
" # TODO Calculate the derivatives of the loss with respect to the biases at layer this from all_dl_df[layer]. (eq 7.21)\n",
" # NOTE! To take a copy of matrix X, use Z=np.array(X)\n",
" # REPLACE THIS LINE\n",
" all_dl_dbiases[layer] = np.zeros_like(all_biases[layer])\n",
"\n",
" # TODO Calculate the derivatives of weight at layer from all_dl_df[K] and all_h[K] (eq 7.22)\n",
" # TODO Calculate the derivatives of the loss with respect to the weights at layer from all_dl_df[K] and all_h[K] (eq 7.22)\n",
" # Don't forget to use np.matmul\n",
" # REPLACE THIS LINE\n",
" all_dl_dweights[layer] = np.zeros_like(all_weights[layer])\n",
"\n",
" # TODO: calculate the derivatives of activations from weight and derivatives of next preactivations (eq 7.20)\n",
" # TODO: calculate the derivatives of the loss with respect to th eactivations from weight and derivatives of next preactivations (eq 7.20)\n",
" # REPLACE THIS LINE\n",
" all_dl_dh[layer] = np.zeros_like(all_h[layer])\n",
"\n",
"\n",
" if layer > 0:\n",
" # TODO Calculate the derivatives of the pre-activation f with respect to activation h (deriv of ReLu function)\n",
" # TODO Calculate the derivatives of the loss with respect to the pre-activation f with respect to activation h (deriv of ReLu function)\n",
" # REPLACE THIS LINE\n",
" all_dl_df[layer-1] = np.zeros_like(all_f[layer-1])\n",
"\n",
Expand Down Expand Up @@ -299,7 +299,7 @@
"# Let's test if we have the derivatives right using finite differences\n",
"delta_fd = 0.000001\n",
"\n",
"# Test the derivatives of the bias vectors\n",
"# Test the dervatives of the bias vectors\n",
"for layer in range(K):\n",
" dl_dbias = np.zeros_like(all_dl_dbiases[layer])\n",
" # For every element in the bias\n",
Expand Down

0 comments on commit b9238e3

Please sign in to comment.