From 6c77976a97b582d7a72c0eca4185e48631068908 Mon Sep 17 00:00:00 2001 From: Sung Kim Date: Fri, 3 Nov 2017 22:35:57 +0800 Subject: [PATCH] Fixed typos --- .gitignore | 1 + tutorials/PyTorchDataParallel.ipynb | 23 ++++++++++++----------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/.gitignore b/.gitignore index b919e1b..1d0811c 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ *.pt X* ppts +.ipynb_checkpoints client_secret.json __pycache__/ tmp diff --git a/tutorials/PyTorchDataParallel.ipynb b/tutorials/PyTorchDataParallel.ipynb index a5fb50a..cc557b5 100644 --- a/tutorials/PyTorchDataParallel.ipynb +++ b/tutorials/PyTorchDataParallel.ipynb @@ -37,16 +37,17 @@ "```python\n", "mytensor = my_tensor.gpu()\n", "```\n", - "Please note that just calling `mytensor.gpu()` won't copy the tesor to GPU. You need to assign it to a new tensor and use the tensor on GPU.\n", + "Please note that just calling `mytensor.gpu()` won't copy the tensor to GPU. You need to assign it to a new tensor and use the tensor on GPU.\n", "\n", - "Furthermore, it's natural to execute your long-waiting forward, backward propagations on multiple GPUs. Unfortunatly, PyTorch won't do that automatically for you. Not yet. (It will just use one GPU for you.) \n", + "Furthermore, it's natural to execute your long-waiting forward, backward propagations on multiple GPUs. Unfortunately, PyTorch won't do that automatically for you. Not yet. (It will just use one GPU for you.) \n", "\n", - "However, running your operations on multiple GPUs is very easy. Jusy you need to make your model dataparallelable using this. \n", + "However, running your operations on multiple GPUs is very easy. Just you need to make your model dataparallelable using this. \n", "```python\n", "model = nn.DataParallel(model)\n", "```\n", "\n", - "That's it. If you want to know more, here we are!\n" + "That's it. If you want to know more, here we are!\n", + "\n" ] }, { @@ -57,7 +58,7 @@ }, "source": [ "## Imports and parameters\n", - "Let's import our favorate core PyTorch things and define some paramters." + "Let's import our favorite core PyTorch things and define some parameters. " ] }, { @@ -140,9 +141,9 @@ }, "source": [ "## Simple Model\n", - "Then, we need a model to run. For DataParallel demo, let's make a simple one. Just get an input and do a linear operation, and output . However, you can make any model including CNN, RNN or even Capsule Net for `DataParallel`.\n", + "Then, we need a model to run. For DataParallel demo, let's make a simple one. Just get an input and do a linear operation, and output. However, you can make any model including CNN, RNN or even Capsule Net for `DataParallel`.\n", "\n", - "Inside of the model, we just put a print statement to monitor the szie of input and outout tensors. Please pay attention on the batch part, rank 0 when they print out something." + "Inside of the model, we just put a print statement to monitor the size of input and output tensors. Please pay attention to the batch part, rank 0 when they print out something." ] }, { @@ -184,7 +185,7 @@ }, "source": [ "## Create Model and DataParallel\n", - "Here is the core part. First make a model instance, and check if you have multiple GPUs. (If you don't, I feel sorry for you.) If you have, just wrap our model using `nn.DataParallel`. That's it. I know, it's hard to believe, but that's really it!\n", + "Here is the core part. First, make a model instance, and check if you have multiple GPUs. (If you don't, I feel sorry for you.) If you have, just wrap our model using `nn.DataParallel`. That's it. I know, it's hard to believe, but that's really it!\n", "\n", "Then, finally put your model on GPU by `model.gpu()`. It's simple and beautiful.\n" ] @@ -223,7 +224,7 @@ }, "source": [ "## Fun part\n", - "Now it's the fun part. Just get data from the dataloader and see what are the size of input and out tensors!" + "Now it's the fun part. Just get data from the dataloader and see the size of input and out tensors!" ] }, { @@ -295,9 +296,9 @@ }, "source": [ "## Didn't you see?\n", - "Hmm, did you see something working here? It seems just batch 30 input and outout 30. The model gets 30 and spit out 30. Nothing special.\n", + "Hmm, did you see something working here? It seems just batch 30 input and output 30. The model gets 30 and spits out 30. Nothing special.\n", "\n", - "BUT, Wait! This notebook (or yours) does not have GPUs. If you have GPUs, the execution looks like this, caled DataParallel!\n", + "BUT, Wait! This notebook (or yours) does not have GPUs. If you have GPUs, the execution looks like this, called DataParallel!\n", "\n", "### 2 GPUs\n", "```bash\n",