diff --git a/Notebooks/Chap02/2_1_Supervised_Learning.ipynb b/Notebooks/Chap02/2_1_Supervised_Learning.ipynb index c7be987f..42f33447 100644 --- a/Notebooks/Chap02/2_1_Supervised_Learning.ipynb +++ b/Notebooks/Chap02/2_1_Supervised_Learning.ipynb @@ -174,7 +174,7 @@ { "cell_type": "code", "source": [ - "# TO DO -- Change the parameters manually to fit the model\n", + "# TODO -- Change the parameters manually to fit the model\n", "# First fix phi1 and try changing phi0 until you can't make the loss go down any more\n", "# Then fix phi0 and try changing phi1 until you can't make the loss go down any more\n", "# Repeat this process until you find a set of parameters that fit the model as in figure 2.2d\n", diff --git a/Notebooks/Chap06/6_5_Adam.ipynb b/Notebooks/Chap06/6_5_Adam.ipynb index 211fc580..bb975d44 100644 --- a/Notebooks/Chap06/6_5_Adam.ipynb +++ b/Notebooks/Chap06/6_5_Adam.ipynb @@ -185,11 +185,11 @@ " for c_step in range(n_steps):\n", " # Measure the gradient as in equation 6.13 (first line)\n", " m = get_loss_gradient(grad_path[0,c_step], grad_path[1,c_step]);\n", - " # TO DO -- compute the squared gradient as in equation 6.13 (second line)\n", + " # TODO -- compute the squared gradient as in equation 6.13 (second line)\n", " # Replace this line:\n", " v = np.ones_like(grad_path[:,0])\n", "\n", - " # TO DO -- apply the update rule (equation 6.14)\n", + " # TODO -- apply the update rule (equation 6.14)\n", " # Replace this line:\n", " grad_path[:,c_step+1] = grad_path[:,c_step]\n", "\n", @@ -254,7 +254,7 @@ " v_tilde = v\n", "\n", "\n", - " # TO DO -- apply the update rule (equation 6.17)\n", + " # TODO -- apply the update rule (equation 6.17)\n", " # Replace this line:\n", " grad_path[:,c_step+1] = grad_path[:,c_step]\n", "\n", diff --git a/Notebooks/Chap07/7_2_Backpropagation.ipynb b/Notebooks/Chap07/7_2_Backpropagation.ipynb index dac7e89c..b3f3cd8a 100644 --- a/Notebooks/Chap07/7_2_Backpropagation.ipynb +++ b/Notebooks/Chap07/7_2_Backpropagation.ipynb @@ -148,7 +148,7 @@ " all_h[layer+1] = all_f[layer]\n", "\n", " # Compute the output from the last hidden layer\n", - " # TO DO -- Replace the line below\n", + " # TODO -- Replace the line below\n", " all_f[K] = np.zeros_like(all_biases[-1])\n", "\n", " # Retrieve the output\n", diff --git a/Notebooks/Chap10/10_4_Downsampling_and_Upsampling.ipynb b/Notebooks/Chap10/10_4_Downsampling_and_Upsampling.ipynb index 4010a6c4..d0585a5a 100644 --- a/Notebooks/Chap10/10_4_Downsampling_and_Upsampling.ipynb +++ b/Notebooks/Chap10/10_4_Downsampling_and_Upsampling.ipynb @@ -73,7 +73,7 @@ "source": [ "def subsample(x_in):\n", " x_out = np.zeros(( int(np.ceil(x_in.shape[0]/2)), int(np.ceil(x_in.shape[1]/2)) ))\n", - " # TO DO -- write the subsampling routine\n", + " # TODO -- write the subsampling routine\n", " # Replace this line\n", " x_out = x_out\n", "\n", @@ -159,7 +159,7 @@ "# Now let's try max-pooling\n", "def maxpool(x_in):\n", " x_out = np.zeros(( int(np.floor(x_in.shape[0]/2)), int(np.floor(x_in.shape[1]/2)) ))\n", - " # TO DO -- write the maxpool routine\n", + " # TODO -- write the maxpool routine\n", " # Replace this line\n", " x_out = x_out\n", "\n", @@ -230,7 +230,7 @@ "# Finally, let's try mean pooling\n", "def meanpool(x_in):\n", " x_out = np.zeros(( int(np.floor(x_in.shape[0]/2)), int(np.floor(x_in.shape[1]/2)) ))\n", - " # TO DO -- write the meanpool routine\n", + " # TODO -- write the meanpool routine\n", " # Replace this line\n", " x_out = x_out\n", "\n", @@ -316,7 +316,7 @@ "# Let's first use the duplication method\n", "def duplicate(x_in):\n", " x_out = np.zeros(( x_in.shape[0]*2, x_in.shape[1]*2 ))\n", - " # TO DO -- write the duplication routine\n", + " # TODO -- write the duplication routine\n", " # Replace this line\n", " x_out = x_out\n", "\n", @@ -388,7 +388,7 @@ "# The input x_high_res is the original high res image, from which you can deduce the position of the maximum index\n", "def max_unpool(x_in, x_high_res):\n", " x_out = np.zeros(( x_in.shape[0]*2, x_in.shape[1]*2 ))\n", - " # TO DO -- write the subsampling routine\n", + " # TODO -- write the subsampling routine\n", " # Replace this line\n", " x_out = x_out\n", "\n", @@ -460,7 +460,7 @@ " x_out = np.zeros(( x_in.shape[0]*2, x_in.shape[1]*2 ))\n", " x_in_pad = np.zeros((x_in.shape[0]+1, x_in.shape[1]+1))\n", " x_in_pad[0:x_in.shape[0],0:x_in.shape[1]] = x_in\n", - " # TO DO -- write the duplication routine\n", + " # TODO -- write the duplication routine\n", " # Replace this line\n", " x_out = x_out\n", "\n", diff --git a/Notebooks/Chap21/21_1_Bias_Mitigation.ipynb b/Notebooks/Chap21/21_1_Bias_Mitigation.ipynb index 2235dd0b..8dbcc8bd 100644 --- a/Notebooks/Chap21/21_1_Bias_Mitigation.ipynb +++ b/Notebooks/Chap21/21_1_Bias_Mitigation.ipynb @@ -328,7 +328,7 @@ }, "outputs": [], "source": [ - "# TO DO -- try to change the two thresholds so the overall probability of getting the loan is 0.6 for each group\n", + "# TODO -- try to change the two thresholds so the overall probability of getting the loan is 0.6 for each group\n", "# Change the values in these lines\n", "tau0 = 0.3\n", "tau1 = -0.1\n", @@ -393,7 +393,7 @@ }, "outputs": [], "source": [ - "# TO DO -- try to change the two thresholds so the true positive are 0.8 for each group\n", + "# TODO --try to change the two thresholds so the true positive are 0.8 for each group\n", "# Change the values in these lines so that both points on the curves have a height of 0.8\n", "tau0 = -0.1\n", "tau1 = -0.7\n",