forked from pytorch/pytorch.github.io
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdistributed_autograd.html
1089 lines (874 loc) · 72.5 KB
/
distributed_autograd.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
<meta name="robots" content="noindex">
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Distributed Autograd Design — PyTorch 1.11.0 documentation</title>
<link rel="canonical" href="https://pytorch.org/docs/stable/rpc/distributed_autograd.html"/>
<link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
<!-- <link rel="stylesheet" href="../_static/pygments.css" type="text/css" /> -->
<link rel="stylesheet" href="../_static/copybutton.css" type="text/css" />
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css" type="text/css" />
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css" type="text/css" />
<link rel="stylesheet" href="../_static/katex-math.css" type="text/css" />
<link rel="stylesheet" href="../_static/css/jit.css" type="text/css" />
<link rel="index" title="Index" href="../genindex.html" />
<link rel="search" title="Search" href="../search.html" />
<link rel="next" title="torch.random" href="../random.html" />
<link rel="prev" title="Remote Reference Protocol" href="rref.html" />
<!-- Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-117752657-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-117752657-2');
</script>
<!-- End Google Analytics -->
<script src="../_static/js/modernizr.min.js"></script>
<!-- Preload the theme fonts -->
<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-book.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-medium-italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<!-- Preload the katex fonts -->
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Math-Italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Main-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Main-Bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Size1-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Size4-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Size2-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Size3-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Caligraphic-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.15.2/css/all.css" integrity="sha384-vSIIfh2YWi9wW0r9iZe7RJPrKwp6bG+s9QZMoITbCckVJqGCCRhc+ccxNcdpHuYu" crossorigin="anonymous">
</head>
<div class="container-fluid header-holder tutorials-header" id="header-holder">
<div class="container">
<div class="header-container">
<a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>
<div class="main-menu">
<ul>
<li>
<a href="https://pytorch.org/get-started">Get Started</a>
</li>
<li>
<a href="https://pytorch.org/ecosystem">Ecosystem</a>
</li>
<li>
<a href="https://pytorch.org/mobile">Mobile</a>
</li>
<li>
<a href="https://pytorch.org/blog/">Blog</a>
</li>
<li>
<a href="https://pytorch.org/tutorials">Tutorials</a>
</li>
<li class="active docs-active">
<div id="resourcesDropdownButton" data-toggle="resources-dropdown" class="resources-dropdown">
<a class="resource-option with-down-orange-arrow">
Docs
</a>
<div class="resources-dropdown-menu">
<a class="doc-dropdown-option nav-dropdown-item" href="https://pytorch.org/docs/stable/index.html">
<span class="dropdown-title">PyTorch</span>
<p></p>
</a>
<a class="doc-dropdown-option nav-dropdown-item" href="https://pytorch.org/audio/stable/index.html">
<span class="dropdown-title">torchaudio</span>
<p></p>
</a>
<a class="doc-dropdown-option nav-dropdown-item" href="https://pytorch.org/text/stable/index.html">
<span class="dropdown-title">torchtext</span>
<p></p>
</a>
<a class="doc-dropdown-option nav-dropdown-item" href="https://pytorch.org/vision/stable/index.html">
<span class="dropdown-title">torchvision</span>
<p></p>
</a>
<a class="doc-dropdown-option nav-dropdown-item" href="https://pytorch.org/torchrec">
<span class="dropdown-title">TorchRec</span>
<p></p>
</a>
<a class="doc-dropdown-option nav-dropdown-item" href="https://pytorch.org/data">
<span class="dropdown-title">TorchData</span>
<p></p>
</a>
<a class="doc-dropdown-option nav-dropdown-item" href="https://pytorch.org/serve/">
<span class="dropdown-title">TorchServe</span>
<p></p>
</a>
<a class="doc-dropdown-option nav-dropdown-item" href="https://pytorch.org/xla">
<span class="dropdown-title">PyTorch on XLA Devices</span>
<p></p>
</a>
</div>
</li>
<li>
<div id="resourcesDropdownButton" data-toggle="resources-dropdown" class="resources-dropdown">
<a class="resource-option with-down-arrow">
Resources
</a>
<div class="resources-dropdown-menu">
<a class="nav-dropdown-item" href="https://pytorch.org/features">
<span class="dropdown-title">About</span>
<p>Learn about PyTorch’s features and capabilities</p>
</a>
<a class="nav-dropdown-item" href="https://pytorch.org/#community-module">
<span class="dropdown-title">Community</span>
<p>Join the PyTorch developer community to contribute, learn, and get your questions answered.</p>
</a>
<a class="nav-dropdown-item" href="https://pytorch.org/resources">
<span class="dropdown-title">Developer Resources</span>
<p>Find resources and get questions answered</p>
</a>
<a class="nav-dropdown-item" href="https://discuss.pytorch.org/" target="_blank">
<span class="dropdown-title">Forums</span>
<p>A place to discuss PyTorch code, issues, install, research</p>
</a>
<a class="nav-dropdown-item" href="https://pytorch.org/hub">
<span class="dropdown-title">Models (Beta)</span>
<p>Discover, publish, and reuse pre-trained models</p>
</a>
</div>
</div>
</li>
<li>
<a href="https://github.com/pytorch/pytorch">GitHub</a>
</li>
</ul>
</div>
<a class="main-menu-open-button" href="#" data-behavior="open-mobile-menu"></a>
</div>
</div>
</div>
<body class="pytorch-body">
<div class="table-of-contents-link-wrapper">
<span>Table of Contents</span>
<a href="#" class="toggle-table-of-contents" data-behavior="toggle-table-of-contents"></a>
</div>
<nav data-toggle="wy-nav-shift" class="pytorch-left-menu" id="pytorch-left-menu">
<div class="pytorch-side-scroll">
<div class="pytorch-menu pytorch-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
<div class="pytorch-left-menu-search">
<div class="version">
<a href='https://pytorch.org/docs/versions.html'>1.11.0 ▼</a>
</div>
<div role="search">
<form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
<input type="text" name="q" placeholder="Search Docs" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
</div>
</div>
<p class="caption"><span class="caption-text">Notes</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../notes/amp_examples.html">Automatic Mixed Precision examples</a></li>
<li class="toctree-l1"><a class="reference internal" href="../notes/autograd.html">Autograd mechanics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../notes/broadcasting.html">Broadcasting semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../notes/cpu_threading_torchscript_inference.html">CPU threading and TorchScript inference</a></li>
<li class="toctree-l1"><a class="reference internal" href="../notes/cuda.html">CUDA semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../notes/ddp.html">Distributed Data Parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="../notes/extending.html">Extending PyTorch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../notes/faq.html">Frequently Asked Questions</a></li>
<li class="toctree-l1"><a class="reference internal" href="../notes/gradcheck.html">Gradcheck mechanics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../notes/hip.html">HIP (ROCm) semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../notes/large_scale_deployments.html">Features for large-scale deployments</a></li>
<li class="toctree-l1"><a class="reference internal" href="../notes/modules.html">Modules</a></li>
<li class="toctree-l1"><a class="reference internal" href="../notes/multiprocessing.html">Multiprocessing best practices</a></li>
<li class="toctree-l1"><a class="reference internal" href="../notes/numerical_accuracy.html">Numerical accuracy</a></li>
<li class="toctree-l1"><a class="reference internal" href="../notes/randomness.html">Reproducibility</a></li>
<li class="toctree-l1"><a class="reference internal" href="../notes/serialization.html">Serialization semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../notes/windows.html">Windows FAQ</a></li>
</ul>
<p class="caption"><span class="caption-text">Language Bindings</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../cpp_index.html">C++</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/javadoc/">Javadoc</a></li>
<li class="toctree-l1"><a class="reference internal" href="../deploy.html">torch::deploy</a></li>
</ul>
<p class="caption"><span class="caption-text">Python API</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="../torch.html">torch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../nn.html">torch.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../nn.functional.html">torch.nn.functional</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tensors.html">torch.Tensor</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tensor_attributes.html">Tensor Attributes</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tensor_view.html">Tensor Views</a></li>
<li class="toctree-l1"><a class="reference internal" href="../autograd.html">torch.autograd</a></li>
<li class="toctree-l1"><a class="reference internal" href="../cuda.html">torch.cuda</a></li>
<li class="toctree-l1"><a class="reference internal" href="../amp.html">torch.cuda.amp</a></li>
<li class="toctree-l1"><a class="reference internal" href="../backends.html">torch.backends</a></li>
<li class="toctree-l1"><a class="reference internal" href="../distributed.html">torch.distributed</a></li>
<li class="toctree-l1"><a class="reference internal" href="../distributed.algorithms.join.html">torch.distributed.algorithms.join</a></li>
<li class="toctree-l1"><a class="reference internal" href="../distributed.elastic.html">torch.distributed.elastic</a></li>
<li class="toctree-l1"><a class="reference internal" href="../fsdp.html">torch.distributed.fsdp</a></li>
<li class="toctree-l1"><a class="reference internal" href="../distributed.optim.html">torch.distributed.optim</a></li>
<li class="toctree-l1"><a class="reference internal" href="../distributions.html">torch.distributions</a></li>
<li class="toctree-l1"><a class="reference internal" href="../fft.html">torch.fft</a></li>
<li class="toctree-l1"><a class="reference internal" href="../futures.html">torch.futures</a></li>
<li class="toctree-l1"><a class="reference internal" href="../fx.html">torch.fx</a></li>
<li class="toctree-l1"><a class="reference internal" href="../hub.html">torch.hub</a></li>
<li class="toctree-l1"><a class="reference internal" href="../jit.html">torch.jit</a></li>
<li class="toctree-l1"><a class="reference internal" href="../linalg.html">torch.linalg</a></li>
<li class="toctree-l1"><a class="reference internal" href="../monitor.html">torch.monitor</a></li>
<li class="toctree-l1"><a class="reference internal" href="../special.html">torch.special</a></li>
<li class="toctree-l1"><a class="reference internal" href="../torch.overrides.html">torch.overrides</a></li>
<li class="toctree-l1"><a class="reference internal" href="../package.html">torch.package</a></li>
<li class="toctree-l1"><a class="reference internal" href="../profiler.html">torch.profiler</a></li>
<li class="toctree-l1"><a class="reference internal" href="../nn.init.html">torch.nn.init</a></li>
<li class="toctree-l1"><a class="reference internal" href="../onnx.html">torch.onnx</a></li>
<li class="toctree-l1"><a class="reference internal" href="../optim.html">torch.optim</a></li>
<li class="toctree-l1"><a class="reference internal" href="../complex_numbers.html">Complex Numbers</a></li>
<li class="toctree-l1"><a class="reference internal" href="../ddp_comm_hooks.html">DDP Communication Hooks</a></li>
<li class="toctree-l1"><a class="reference internal" href="../pipeline.html">Pipeline Parallelism</a></li>
<li class="toctree-l1"><a class="reference internal" href="../quantization.html">Quantization</a></li>
<li class="toctree-l1 current"><a class="reference internal" href="../rpc.html">Distributed RPC Framework</a></li>
<li class="toctree-l1"><a class="reference internal" href="../random.html">torch.random</a></li>
<li class="toctree-l1"><a class="reference internal" href="../sparse.html">torch.sparse</a></li>
<li class="toctree-l1"><a class="reference internal" href="../storage.html">torch.Storage</a></li>
<li class="toctree-l1"><a class="reference internal" href="../testing.html">torch.testing</a></li>
<li class="toctree-l1"><a class="reference internal" href="../benchmark_utils.html">torch.utils.benchmark</a></li>
<li class="toctree-l1"><a class="reference internal" href="../bottleneck.html">torch.utils.bottleneck</a></li>
<li class="toctree-l1"><a class="reference internal" href="../checkpoint.html">torch.utils.checkpoint</a></li>
<li class="toctree-l1"><a class="reference internal" href="../cpp_extension.html">torch.utils.cpp_extension</a></li>
<li class="toctree-l1"><a class="reference internal" href="../data.html">torch.utils.data</a></li>
<li class="toctree-l1"><a class="reference internal" href="../dlpack.html">torch.utils.dlpack</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mobile_optimizer.html">torch.utils.mobile_optimizer</a></li>
<li class="toctree-l1"><a class="reference internal" href="../model_zoo.html">torch.utils.model_zoo</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tensorboard.html">torch.utils.tensorboard</a></li>
<li class="toctree-l1"><a class="reference internal" href="../type_info.html">Type Info</a></li>
<li class="toctree-l1"><a class="reference internal" href="../named_tensor.html">Named Tensors</a></li>
<li class="toctree-l1"><a class="reference internal" href="../name_inference.html">Named Tensors operator coverage</a></li>
<li class="toctree-l1"><a class="reference internal" href="../__config__.html">torch.__config__</a></li>
</ul>
<p class="caption"><span class="caption-text">Libraries</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/audio/stable">torchaudio</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/text/stable">torchtext</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/vision/stable">torchvision</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/serve">TorchServe</a></li>
<li class="toctree-l1"><a class="reference external" href="http://pytorch.org/xla/">PyTorch on XLA Devices</a></li>
</ul>
<p class="caption"><span class="caption-text">Community</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../community/contribution_guide.html">PyTorch Contribution Guide</a></li>
<li class="toctree-l1"><a class="reference internal" href="../community/governance.html">PyTorch Governance</a></li>
<li class="toctree-l1"><a class="reference internal" href="../community/persons_of_interest.html">PyTorch Governance | Persons of Interest</a></li>
</ul>
</div>
</div>
</nav>
<div class="pytorch-container">
<div class="pytorch-page-level-bar" id="pytorch-page-level-bar">
<div class="pytorch-breadcrumbs-wrapper">
<div role="navigation" aria-label="breadcrumbs navigation">
<ul class="pytorch-breadcrumbs">
<li>
<a href="../index.html">
Docs
</a> >
</li>
<li><a href="../rpc.html">Distributed RPC Framework</a> ></li>
<li>Distributed Autograd Design</li>
<li class="pytorch-breadcrumbs-aside">
<a href="../_sources/rpc/distributed_autograd.rst.txt" rel="nofollow"><img src="../_static/images/view-page-source-icon.svg"></a>
</li>
</ul>
</div>
</div>
<div class="pytorch-shortcuts-wrapper" id="pytorch-shortcuts-wrapper">
Shortcuts
</div>
</div>
<section data-toggle="wy-nav-shift" id="pytorch-content-wrap" class="pytorch-content-wrap">
<div class="pytorch-content-left">
<div class="rst-content">
<div role="main" class="main-content" itemscope="itemscope" itemtype="http://schema.org/Article">
<article itemprop="articleBody" id="pytorch-article" class="pytorch-article">
<div class="section" id="distributed-autograd-design">
<span id="id1"></span><h1>Distributed Autograd Design<a class="headerlink" href="#distributed-autograd-design" title="Permalink to this headline">¶</a></h1>
<p>This note will present the detailed design for distributed autograd and walk
through the internals of the same. Make sure you’re familiar with
<a class="reference internal" href="../notes/autograd.html#autograd-mechanics"><span class="std std-ref">Autograd mechanics</span></a> and the <a class="reference internal" href="../rpc.html#distributed-rpc-framework"><span class="std std-ref">Distributed RPC Framework</span></a> before
proceeding.</p>
<div class="section" id="background">
<h2>Background<a class="headerlink" href="#background" title="Permalink to this headline">¶</a></h2>
<p>Let’s say you have two nodes and a very simple model partitioned across two
nodes. This can be implemented using <a class="reference internal" href="../rpc.html#module-torch.distributed.rpc" title="torch.distributed.rpc"><code class="xref py py-mod docutils literal notranslate"><span class="pre">torch.distributed.rpc</span></code></a> as follows:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">import</span> <span class="nn">torch.distributed.rpc</span> <span class="k">as</span> <span class="nn">rpc</span>
<span class="k">def</span> <span class="nf">my_add</span><span class="p">(</span><span class="n">t1</span><span class="p">,</span> <span class="n">t2</span><span class="p">):</span>
<span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">add</span><span class="p">(</span><span class="n">t1</span><span class="p">,</span> <span class="n">t2</span><span class="p">)</span>
<span class="c1"># On worker 0:</span>
<span class="n">t1</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">rand</span><span class="p">((</span><span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">t2</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">rand</span><span class="p">((</span><span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="c1"># Perform some computation remotely.</span>
<span class="n">t3</span> <span class="o">=</span> <span class="n">rpc</span><span class="o">.</span><span class="n">rpc_sync</span><span class="p">(</span><span class="s2">"worker1"</span><span class="p">,</span> <span class="n">my_add</span><span class="p">,</span> <span class="n">args</span><span class="o">=</span><span class="p">(</span><span class="n">t1</span><span class="p">,</span> <span class="n">t2</span><span class="p">))</span>
<span class="c1"># Perform some computation locally based on remote result.</span>
<span class="n">t4</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">rand</span><span class="p">((</span><span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">t5</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">mul</span><span class="p">(</span><span class="n">t3</span><span class="p">,</span> <span class="n">t4</span><span class="p">)</span>
<span class="c1"># Compute some loss.</span>
<span class="n">loss</span> <span class="o">=</span> <span class="n">t5</span><span class="o">.</span><span class="n">sum</span><span class="p">()</span>
</pre></div>
</div>
<p>The main motivation behind distributed autograd is to enable running a backward
pass on such distributed models with the <code class="docutils literal notranslate"><span class="pre">loss</span></code> that we’ve computed and
record appropriate gradients for all tensors that require gradients.</p>
</div>
<div class="section" id="autograd-recording-during-the-forward-pass">
<h2>Autograd recording during the forward pass<a class="headerlink" href="#autograd-recording-during-the-forward-pass" title="Permalink to this headline">¶</a></h2>
<p>PyTorch builds the autograd graph during the forward pass and this graph is
used to execute the backward pass. For more details see
<a class="reference internal" href="../notes/autograd.html#how-autograd-encodes-history"><span class="std std-ref">How autograd encodes the history</span></a>.</p>
<p>For distributed autograd, we need to keep track of all RPCs during the forward
pass to ensure the backward pass is executed appropriately. For this purpose,
we attach <code class="docutils literal notranslate"><span class="pre">send</span></code> and <code class="docutils literal notranslate"><span class="pre">recv</span></code> functions to the autograd graph when we perform
an RPC.</p>
<ul class="simple">
<li><p>The <code class="docutils literal notranslate"><span class="pre">send</span></code> function is attached to the source of the RPC and its output
edges point to the autograd function for the input tensors of the RPC.
The input for this function during the backward pass is received from the
destination as the output of the appropriate <code class="docutils literal notranslate"><span class="pre">recv</span></code> function.</p></li>
<li><p>The <code class="docutils literal notranslate"><span class="pre">recv</span></code> function is attached to the destination of the RPC and its
inputs are retrieved from operators executed on the destination using the
input tensors. The output gradients of this function are sent to the source
node to the appropriate <code class="docutils literal notranslate"><span class="pre">send</span></code> function during the backward pass.</p></li>
<li><p>Each <code class="docutils literal notranslate"><span class="pre">send-recv</span></code> pair is assigned a globally unique <code class="docutils literal notranslate"><span class="pre">autograd_message_id</span></code>
to uniquely identify the pair. This is useful to look up the corresponding
function on a remote node during the backward pass.</p></li>
<li><p>For <a class="reference internal" href="../rpc.html#rref"><span class="std std-ref">RRef</span></a>, whenever we call <a class="reference internal" href="../rpc.html#torch.distributed.rpc.RRef.to_here" title="torch.distributed.rpc.RRef.to_here"><code class="xref py py-meth docutils literal notranslate"><span class="pre">torch.distributed.rpc.RRef.to_here()</span></code></a>
we attach an appropriate <code class="docutils literal notranslate"><span class="pre">send-recv</span></code> pair for the tensors involved.</p></li>
</ul>
<p>As an example, this is what the autograd graph for our example above would look
like (t5.sum() excluded for simplicity):</p>
<img alt="../_images/send_recv_functions.png" src="../_images/send_recv_functions.png" />
</div>
<div class="section" id="distributed-autograd-context">
<h2>Distributed Autograd Context<a class="headerlink" href="#distributed-autograd-context" title="Permalink to this headline">¶</a></h2>
<p>Each forward and backward pass that uses distributed autograd is assigned a
unique <a class="reference internal" href="../rpc.html#torch.distributed.autograd.context" title="torch.distributed.autograd.context"><code class="xref py py-class docutils literal notranslate"><span class="pre">torch.distributed.autograd.context</span></code></a> and this context has a
globally unique <code class="docutils literal notranslate"><span class="pre">autograd_context_id</span></code>. This context is created on each node
as needed.</p>
<p>This context serves the following purpose:</p>
<ol class="arabic simple">
<li><p>Multiple nodes running distributed backward passes might accumulate
gradients on the same tensor and as a result the <code class="docutils literal notranslate"><span class="pre">.grad</span></code> field of the
tensor would have gradients from a variety of distributed backward passes
before we have the opportunity to run the optimizer. This is similar to
calling <a class="reference internal" href="../generated/torch.autograd.backward.html#torch.autograd.backward" title="torch.autograd.backward"><code class="xref py py-meth docutils literal notranslate"><span class="pre">torch.autograd.backward()</span></code></a> multiple times locally. In order to
provide a way of separating out the gradients for each backward pass, the
gradients are accumulated in the <a class="reference internal" href="../rpc.html#torch.distributed.autograd.context" title="torch.distributed.autograd.context"><code class="xref py py-class docutils literal notranslate"><span class="pre">torch.distributed.autograd.context</span></code></a>
for each backward pass.</p></li>
<li><p>During the forward pass we store the <code class="docutils literal notranslate"><span class="pre">send</span></code> and <code class="docutils literal notranslate"><span class="pre">recv</span></code> functions for
each autograd pass in this context. This ensures we hold references to the
appropriate nodes in the autograd graph to keep it alive. In addition to
this, it is easy to look up the appropriate <code class="docutils literal notranslate"><span class="pre">send</span></code> and <code class="docutils literal notranslate"><span class="pre">recv</span></code> functions
during the backward pass.</p></li>
<li><p>In general we also use this context to store some metadata for each
distributed autograd pass.</p></li>
</ol>
<div class="line-block">
<div class="line"><br /></div>
</div>
<p>From the user’s perspective the autograd context is setup as follows:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">torch.distributed.autograd</span> <span class="k">as</span> <span class="nn">dist_autograd</span>
<span class="k">with</span> <span class="n">dist_autograd</span><span class="o">.</span><span class="n">context</span><span class="p">()</span> <span class="k">as</span> <span class="n">context_id</span><span class="p">:</span>
<span class="n">loss</span> <span class="o">=</span> <span class="n">model</span><span class="o">.</span><span class="n">forward</span><span class="p">()</span>
<span class="n">dist_autograd</span><span class="o">.</span><span class="n">backward</span><span class="p">(</span><span class="n">context_id</span><span class="p">,</span> <span class="n">loss</span><span class="p">)</span>
</pre></div>
</div>
<p>It is important to note that your model’s forward pass must be invoked within
the distributed autograd context manager, as a valid context is needed in
order to ensure that all <code class="docutils literal notranslate"><span class="pre">send</span></code> and <code class="docutils literal notranslate"><span class="pre">recv</span></code> functions are stored properly
to run the backward pass across all participating nodes.</p>
</div>
<div class="section" id="distributed-backward-pass">
<h2>Distributed Backward Pass<a class="headerlink" href="#distributed-backward-pass" title="Permalink to this headline">¶</a></h2>
<p>In this section we outline the challenge of computing dependencies accurately
during a distributed backward pass and describe a couple of algorithms (with
tradeoffs) on how we can execute a distributed backward pass.</p>
<div class="section" id="computing-dependencies">
<h3>Computing dependencies<a class="headerlink" href="#computing-dependencies" title="Permalink to this headline">¶</a></h3>
<p>Consider the following piece of code being run on a single machine</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">torch</span>
<span class="n">a</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">rand</span><span class="p">((</span><span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">b</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">rand</span><span class="p">((</span><span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">c</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">rand</span><span class="p">((</span><span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">d</span> <span class="o">=</span> <span class="n">a</span> <span class="o">+</span> <span class="n">b</span>
<span class="n">e</span> <span class="o">=</span> <span class="n">b</span> <span class="o">*</span> <span class="n">c</span>
<span class="n">d</span><span class="o">.</span><span class="n">sum</span><span class="o">.</span><span class="p">()</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>
</pre></div>
</div>
<p>This is what the autograd graph for the code above would look like:</p>
<a class="reference internal image-reference" href="../_images/local_dependencies.png"><img alt="../_images/local_dependencies.png" src="../_images/local_dependencies.png" style="width: 372.0px; height: 264.8px;" /></a>
<p>The first step the autograd engine performs as part of the backward pass is
computing the number of dependencies for each node in the autograd graph. This
helps the autograd engine know when a node in the graph is ready for execution.
The numbers in brackets for <code class="docutils literal notranslate"><span class="pre">add(1)</span></code> and <code class="docutils literal notranslate"><span class="pre">mul(0)</span></code> denote the number of
dependencies. As you can see, this means during the backward pass the <code class="docutils literal notranslate"><span class="pre">add</span></code>
node needs 1 input and the <code class="docutils literal notranslate"><span class="pre">mul</span></code> node doesn’t need any inputs (in other
words doesn’t need to be executed). The local autograd engine computes these
dependencies by traversing the graph from the root nodes (<code class="docutils literal notranslate"><span class="pre">d</span></code> in this case).</p>
<p>The fact that certain nodes in the autograd graph might not be executed in the
backward pass poses a challenge for distributed autograd. Consider this piece
of code which uses RPC.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">import</span> <span class="nn">torch.distributed.rpc</span> <span class="k">as</span> <span class="nn">rpc</span>
<span class="n">a</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">rand</span><span class="p">((</span><span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">b</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">rand</span><span class="p">((</span><span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">c</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">rand</span><span class="p">((</span><span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">d</span> <span class="o">=</span> <span class="n">rpc</span><span class="o">.</span><span class="n">rpc_sync</span><span class="p">(</span><span class="s2">"worker1"</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">add</span><span class="p">,</span> <span class="n">args</span><span class="o">=</span><span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="n">b</span><span class="p">))</span>
<span class="n">e</span> <span class="o">=</span> <span class="n">rpc</span><span class="o">.</span><span class="n">rpc_sync</span><span class="p">(</span><span class="s2">"worker1"</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">mul</span><span class="p">,</span> <span class="n">args</span><span class="o">=</span><span class="p">(</span><span class="n">b</span><span class="p">,</span> <span class="n">c</span><span class="p">))</span>
<span class="n">loss</span> <span class="o">=</span> <span class="n">d</span><span class="o">.</span><span class="n">sum</span><span class="p">()</span>
</pre></div>
</div>
<p>The associated autograd graph for the code above would be:</p>
<img alt="../_images/distributed_dependencies.png" src="../_images/distributed_dependencies.png" />
<p>Computing dependencies of this distributed autograd graph is much more
challenging and requires some overhead (either in terms of computation or
network communication).</p>
<p>For performance sensitive applications we can avoid a
lot of overhead by assuming every <code class="docutils literal notranslate"><span class="pre">send</span></code> and <code class="docutils literal notranslate"><span class="pre">recv</span></code> function are valid as
part of the backward pass (most applications don’t perform RPCs that aren’t
used). This simplifies the distributed autograd algorithm and is much more
efficient, but at the cost that the application needs to be aware of the
limitations. This algorithm is called the <a class="reference internal" href="#id2">FAST mode algorithm</a> and is
described in detail below.</p>
<p>In the general case it might not be necessary that every <code class="docutils literal notranslate"><span class="pre">send</span></code> and <code class="docutils literal notranslate"><span class="pre">recv</span></code>
function is valid as part of the backward pass. To address this, we have
proposed a <a class="reference internal" href="#smart-mode-algorithm">SMART mode algorithm</a> which is described in a later section.
Please note that currently, only the <cite>FAST</cite> mode algorithm is implemented.</p>
</div>
<div class="section" id="fast-mode-algorithm">
<span id="id2"></span><h3>FAST mode algorithm<a class="headerlink" href="#fast-mode-algorithm" title="Permalink to this headline">¶</a></h3>
<p>The key assumption of this algorithm is that each <code class="docutils literal notranslate"><span class="pre">send</span></code> function has a
dependency of 1 when we run a backward pass. In other words, we assume we’ll
receive a gradient over RPC from another node.</p>
<p>The algorithm is as follows:</p>
<ol class="arabic simple">
<li><p>We start from the worker which has the roots for the backward pass
(all roots must be local).</p></li>
<li><p>Lookup all the <code class="docutils literal notranslate"><span class="pre">send</span></code> functions for the current
<a class="reference internal" href="#distributed-autograd-context">Distributed Autograd Context</a>.</p></li>
<li><p>Compute dependencies locally starting from the provided roots and all the
<code class="docutils literal notranslate"><span class="pre">send</span></code> functions we retrieved.</p></li>
<li><p>After computing dependencies, kick off the local autograd engine with the
provided roots.</p></li>
<li><p>When the autograd engine executes the <code class="docutils literal notranslate"><span class="pre">recv</span></code> function, the <code class="docutils literal notranslate"><span class="pre">recv</span></code>
function sends the input gradients via RPC to the appropriate worker.
Each <code class="docutils literal notranslate"><span class="pre">recv</span></code> function knows the destination worker id since it is recorded
as part of the forward pass. The <code class="docutils literal notranslate"><span class="pre">recv</span></code> function also sends over the
<code class="docutils literal notranslate"><span class="pre">autograd_context_id</span></code> and <code class="docutils literal notranslate"><span class="pre">autograd_message_id</span></code> to the remote host.</p></li>
<li><p>When this request is received on the remote host, we use the
<code class="docutils literal notranslate"><span class="pre">autograd_context_id</span></code> and <code class="docutils literal notranslate"><span class="pre">autograd_message_id</span></code> to look up the
appropriate <code class="docutils literal notranslate"><span class="pre">send</span></code> function.</p></li>
<li><p>If this is the first time a worker has received a request for the given
<code class="docutils literal notranslate"><span class="pre">autograd_context_id</span></code>, it will compute dependencies locally as described
in points 1-3 above.</p></li>
<li><p>The <code class="docutils literal notranslate"><span class="pre">send</span></code> function retrieved in 6. is then enqueued for execution on the
local autograd engine for that worker.</p></li>
<li><p>Finally, instead of accumulating the gradients on the <code class="docutils literal notranslate"><span class="pre">.grad</span></code> field of the
Tensor, we accumulate the gradients separately per
<a class="reference internal" href="#distributed-autograd-context">Distributed Autograd Context</a>. The gradients are stored in a
<code class="docutils literal notranslate"><span class="pre">Dict[Tensor,</span> <span class="pre">Tensor]</span></code>, which is basically a map from Tensor to its
associated gradient and this map can be retrieved using the
<a class="reference internal" href="../rpc.html#torch.distributed.autograd.get_gradients" title="torch.distributed.autograd.get_gradients"><code class="xref py py-meth docutils literal notranslate"><span class="pre">get_gradients()</span></code></a> API.</p></li>
</ol>
<div class="line-block">
<div class="line"><br /></div>
</div>
<p>As an example the complete code with distributed autograd would be as follows:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">import</span> <span class="nn">torch.distributed.autograd</span> <span class="k">as</span> <span class="nn">dist_autograd</span>
<span class="kn">import</span> <span class="nn">torch.distributed.rpc</span> <span class="k">as</span> <span class="nn">rpc</span>
<span class="k">def</span> <span class="nf">my_add</span><span class="p">(</span><span class="n">t1</span><span class="p">,</span> <span class="n">t2</span><span class="p">):</span>
<span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">add</span><span class="p">(</span><span class="n">t1</span><span class="p">,</span> <span class="n">t2</span><span class="p">)</span>
<span class="c1"># On worker 0:</span>
<span class="c1"># Setup the autograd context. Computations that take</span>
<span class="c1"># part in the distributed backward pass must be within</span>
<span class="c1"># the distributed autograd context manager.</span>
<span class="k">with</span> <span class="n">dist_autograd</span><span class="o">.</span><span class="n">context</span><span class="p">()</span> <span class="k">as</span> <span class="n">context_id</span><span class="p">:</span>
<span class="n">t1</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">rand</span><span class="p">((</span><span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">t2</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">rand</span><span class="p">((</span><span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="c1"># Perform some computation remotely.</span>
<span class="n">t3</span> <span class="o">=</span> <span class="n">rpc</span><span class="o">.</span><span class="n">rpc_sync</span><span class="p">(</span><span class="s2">"worker1"</span><span class="p">,</span> <span class="n">my_add</span><span class="p">,</span> <span class="n">args</span><span class="o">=</span><span class="p">(</span><span class="n">t1</span><span class="p">,</span> <span class="n">t2</span><span class="p">))</span>
<span class="c1"># Perform some computation locally based on remote result.</span>
<span class="n">t4</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">rand</span><span class="p">((</span><span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">t5</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">mul</span><span class="p">(</span><span class="n">t3</span><span class="p">,</span> <span class="n">t4</span><span class="p">)</span>
<span class="c1"># Compute some loss.</span>
<span class="n">loss</span> <span class="o">=</span> <span class="n">t5</span><span class="o">.</span><span class="n">sum</span><span class="p">()</span>
<span class="c1"># Run the backward pass.</span>
<span class="n">dist_autograd</span><span class="o">.</span><span class="n">backward</span><span class="p">(</span><span class="n">context_id</span><span class="p">,</span> <span class="p">[</span><span class="n">loss</span><span class="p">])</span>
<span class="c1"># Retrieve the gradients from the context.</span>
<span class="n">dist_autograd</span><span class="o">.</span><span class="n">get_gradients</span><span class="p">(</span><span class="n">context_id</span><span class="p">)</span>
</pre></div>
</div>
<p>The distributed autograd graph with dependencies would be as follows (t5.sum() excluded for simplicity):</p>
<img alt="../_images/distributed_dependencies_computed.png" src="../_images/distributed_dependencies_computed.png" />
<p>The <a class="reference internal" href="#id2">FAST mode algorithm</a> applied to the above example would be as follows:</p>
<ol class="arabic simple">
<li><p>On <code class="docutils literal notranslate"><span class="pre">Worker</span> <span class="pre">0</span></code> we start from the roots <code class="docutils literal notranslate"><span class="pre">loss</span></code> and <code class="docutils literal notranslate"><span class="pre">send1</span></code> to compute
dependencies. As a result <code class="docutils literal notranslate"><span class="pre">send1</span></code> is marked with a dependency of 1 and <code class="docutils literal notranslate"><span class="pre">mul</span></code>
on <code class="docutils literal notranslate"><span class="pre">Worker</span> <span class="pre">0</span></code> is marked with a dependency of 1.</p></li>
<li><p>Now, we kickoff the local autograd engine on <code class="docutils literal notranslate"><span class="pre">Worker</span> <span class="pre">0</span></code>. We first execute
the <code class="docutils literal notranslate"><span class="pre">mul</span></code> function, accumulate its output in the autograd context as the
gradient for <code class="docutils literal notranslate"><span class="pre">t4</span></code>. Then, we execute <code class="docutils literal notranslate"><span class="pre">recv2</span></code> which sends the gradients to
<code class="docutils literal notranslate"><span class="pre">Worker</span> <span class="pre">1</span></code>.</p></li>
<li><p>Since this is the first time <code class="docutils literal notranslate"><span class="pre">Worker</span> <span class="pre">1</span></code> has heard about this backward pass,
it starts dependency computation and marks the dependencies for <code class="docutils literal notranslate"><span class="pre">send2</span></code>,
<code class="docutils literal notranslate"><span class="pre">add</span></code> and <code class="docutils literal notranslate"><span class="pre">recv1</span></code> appropriately.</p></li>
<li><p>Next, we enqueue <code class="docutils literal notranslate"><span class="pre">send2</span></code> on the local autograd engine of <code class="docutils literal notranslate"><span class="pre">Worker</span> <span class="pre">1</span></code>, which
in turn executes <code class="docutils literal notranslate"><span class="pre">add</span></code> and <code class="docutils literal notranslate"><span class="pre">recv1</span></code>.</p></li>
<li><p>When <code class="docutils literal notranslate"><span class="pre">recv1</span></code> is executed it sends the gradients over to <code class="docutils literal notranslate"><span class="pre">Worker</span> <span class="pre">0</span></code>.</p></li>
<li><p>Since <code class="docutils literal notranslate"><span class="pre">Worker</span> <span class="pre">0</span></code> has already computed dependencies for this backward pass,
it just enqueues and executes <code class="docutils literal notranslate"><span class="pre">send1</span></code> locally.</p></li>
<li><p>Finally, gradients for <code class="docutils literal notranslate"><span class="pre">t1</span></code>, <code class="docutils literal notranslate"><span class="pre">t2</span></code> and <code class="docutils literal notranslate"><span class="pre">t4</span></code> are accumulated in the
<a class="reference internal" href="#distributed-autograd-context">Distributed Autograd Context</a>.</p></li>
</ol>
</div>
<div class="section" id="smart-mode-algorithm">
<h3>SMART mode algorithm<a class="headerlink" href="#smart-mode-algorithm" title="Permalink to this headline">¶</a></h3>
<p>Full details of this algorithm are still in the works, but for the general idea
you can refer to <strong>Distributed Autograd Algorithm Smart mode</strong> section in the
<a class="reference external" href="https://github.com/pytorch/pytorch/issues/23110">RFC</a>.</p>
</div>
</div>
<div class="section" id="distributed-optimizer">
<h2>Distributed Optimizer<a class="headerlink" href="#distributed-optimizer" title="Permalink to this headline">¶</a></h2>
<p>The <a class="reference internal" href="../distributed.optim.html#torch.distributed.optim.DistributedOptimizer" title="torch.distributed.optim.DistributedOptimizer"><code class="xref py py-class docutils literal notranslate"><span class="pre">DistributedOptimizer</span></code></a> operates as follows:</p>
<ol class="arabic simple">
<li><p>Takes a list of remote parameters (<a class="reference internal" href="../rpc.html#torch.distributed.rpc.RRef" title="torch.distributed.rpc.RRef"><code class="xref py py-class docutils literal notranslate"><span class="pre">RRef</span></code></a>) to
optimize. These could also be local parameters wrapped within a local
<code class="docutils literal notranslate"><span class="pre">RRef</span></code>.</p></li>
<li><p>Takes a <a class="reference internal" href="../optim.html#torch.optim.Optimizer" title="torch.optim.Optimizer"><code class="xref py py-class docutils literal notranslate"><span class="pre">Optimizer</span></code></a> class as the local
optimizer to run on all distinct <code class="docutils literal notranslate"><span class="pre">RRef</span></code> owners.</p></li>
<li><p>The distributed optimizer creates an instance of the local <code class="docutils literal notranslate"><span class="pre">Optimizer</span></code> on
each of the worker nodes and holds an <code class="docutils literal notranslate"><span class="pre">RRef</span></code> to them.</p></li>
<li><p>When <a class="reference internal" href="../distributed.optim.html#torch.distributed.optim.DistributedOptimizer.step" title="torch.distributed.optim.DistributedOptimizer.step"><code class="xref py py-meth docutils literal notranslate"><span class="pre">torch.distributed.optim.DistributedOptimizer.step()</span></code></a> is invoked,
the distributed optimizer uses RPC to remotely execute all the local
optimizers on the appropriate remote workers. A distributed autograd
<code class="docutils literal notranslate"><span class="pre">context_id</span></code> must be provided as input to
<a class="reference internal" href="../distributed.optim.html#torch.distributed.optim.DistributedOptimizer.step" title="torch.distributed.optim.DistributedOptimizer.step"><code class="xref py py-meth docutils literal notranslate"><span class="pre">torch.distributed.optim.DistributedOptimizer.step()</span></code></a>. This is used
by local optimizers to apply gradients stored in the corresponding
context.</p></li>
<li><p>If multiple concurrent distributed optimizers are updating the same
parameters on a worker, these updates are serialized via a lock.</p></li>
</ol>
</div>
<div class="section" id="simple-end-to-end-example">
<h2>Simple end to end example<a class="headerlink" href="#simple-end-to-end-example" title="Permalink to this headline">¶</a></h2>
<p>Putting it all together, the following is a simple end to end example using
distributed autograd and the distributed optimizer. If the code is placed into a
file called “dist_autograd_simple.py”, it can be run with the command
<code class="code docutils literal notranslate"><span class="pre">MASTER_ADDR="localhost"</span> <span class="pre">MASTER_PORT=29500</span> <span class="pre">python</span> <span class="pre">dist_autograd_simple.py</span></code>:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">import</span> <span class="nn">torch.multiprocessing</span> <span class="k">as</span> <span class="nn">mp</span>
<span class="kn">import</span> <span class="nn">torch.distributed.autograd</span> <span class="k">as</span> <span class="nn">dist_autograd</span>
<span class="kn">from</span> <span class="nn">torch.distributed</span> <span class="kn">import</span> <span class="n">rpc</span>
<span class="kn">from</span> <span class="nn">torch</span> <span class="kn">import</span> <span class="n">optim</span>
<span class="kn">from</span> <span class="nn">torch.distributed.optim</span> <span class="kn">import</span> <span class="n">DistributedOptimizer</span>
<span class="k">def</span> <span class="nf">random_tensor</span><span class="p">():</span>
<span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">rand</span><span class="p">((</span><span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="k">def</span> <span class="nf">_run_process</span><span class="p">(</span><span class="n">rank</span><span class="p">,</span> <span class="n">dst_rank</span><span class="p">,</span> <span class="n">world_size</span><span class="p">):</span>
<span class="n">name</span> <span class="o">=</span> <span class="s2">"worker</span><span class="si">{}</span><span class="s2">"</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">rank</span><span class="p">)</span>
<span class="n">dst_name</span> <span class="o">=</span> <span class="s2">"worker</span><span class="si">{}</span><span class="s2">"</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">dst_rank</span><span class="p">)</span>
<span class="c1"># Initialize RPC.</span>
<span class="n">rpc</span><span class="o">.</span><span class="n">init_rpc</span><span class="p">(</span>
<span class="n">name</span><span class="o">=</span><span class="n">name</span><span class="p">,</span>
<span class="n">rank</span><span class="o">=</span><span class="n">rank</span><span class="p">,</span>
<span class="n">world_size</span><span class="o">=</span><span class="n">world_size</span>
<span class="p">)</span>
<span class="c1"># Use a distributed autograd context.</span>
<span class="k">with</span> <span class="n">dist_autograd</span><span class="o">.</span><span class="n">context</span><span class="p">()</span> <span class="k">as</span> <span class="n">context_id</span><span class="p">:</span>
<span class="c1"># Forward pass (create references on remote nodes).</span>
<span class="n">rref1</span> <span class="o">=</span> <span class="n">rpc</span><span class="o">.</span><span class="n">remote</span><span class="p">(</span><span class="n">dst_name</span><span class="p">,</span> <span class="n">random_tensor</span><span class="p">)</span>
<span class="n">rref2</span> <span class="o">=</span> <span class="n">rpc</span><span class="o">.</span><span class="n">remote</span><span class="p">(</span><span class="n">dst_name</span><span class="p">,</span> <span class="n">random_tensor</span><span class="p">)</span>
<span class="n">loss</span> <span class="o">=</span> <span class="n">rref1</span><span class="o">.</span><span class="n">to_here</span><span class="p">()</span> <span class="o">+</span> <span class="n">rref2</span><span class="o">.</span><span class="n">to_here</span><span class="p">()</span>
<span class="c1"># Backward pass (run distributed autograd).</span>
<span class="n">dist_autograd</span><span class="o">.</span><span class="n">backward</span><span class="p">(</span><span class="n">context_id</span><span class="p">,</span> <span class="p">[</span><span class="n">loss</span><span class="o">.</span><span class="n">sum</span><span class="p">()])</span>
<span class="c1"># Build DistributedOptimizer.</span>
<span class="n">dist_optim</span> <span class="o">=</span> <span class="n">DistributedOptimizer</span><span class="p">(</span>
<span class="n">optim</span><span class="o">.</span><span class="n">SGD</span><span class="p">,</span>
<span class="p">[</span><span class="n">rref1</span><span class="p">,</span> <span class="n">rref2</span><span class="p">],</span>
<span class="n">lr</span><span class="o">=</span><span class="mf">0.05</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># Run the distributed optimizer step.</span>
<span class="n">dist_optim</span><span class="o">.</span><span class="n">step</span><span class="p">(</span><span class="n">context_id</span><span class="p">)</span>
<span class="k">def</span> <span class="nf">run_process</span><span class="p">(</span><span class="n">rank</span><span class="p">,</span> <span class="n">world_size</span><span class="p">):</span>
<span class="n">dst_rank</span> <span class="o">=</span> <span class="p">(</span><span class="n">rank</span> <span class="o">+</span> <span class="mi">1</span><span class="p">)</span> <span class="o">%</span> <span class="n">world_size</span>
<span class="n">_run_process</span><span class="p">(</span><span class="n">rank</span><span class="p">,</span> <span class="n">dst_rank</span><span class="p">,</span> <span class="n">world_size</span><span class="p">)</span>
<span class="n">rpc</span><span class="o">.</span><span class="n">shutdown</span><span class="p">()</span>
<span class="k">if</span> <span class="vm">__name__</span> <span class="o">==</span> <span class="s1">'__main__'</span><span class="p">:</span>
<span class="c1"># Run world_size workers</span>
<span class="n">world_size</span> <span class="o">=</span> <span class="mi">2</span>
<span class="n">mp</span><span class="o">.</span><span class="n">spawn</span><span class="p">(</span><span class="n">run_process</span><span class="p">,</span> <span class="n">args</span><span class="o">=</span><span class="p">(</span><span class="n">world_size</span><span class="p">,),</span> <span class="n">nprocs</span><span class="o">=</span><span class="n">world_size</span><span class="p">)</span>
</pre></div>
</div>
</div>
</div>
</article>
</div>
<footer>
<div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
<a href="../random.html" class="btn btn-neutral float-right" title="torch.random" accesskey="n" rel="next">Next <img src="../_static/images/chevron-right-orange.svg" class="next-page"></a>
<a href="rref.html" class="btn btn-neutral" title="Remote Reference Protocol" accesskey="p" rel="prev"><img src="../_static/images/chevron-right-orange.svg" class="previous-page"> Previous</a>
</div>
<hr>
<div role="contentinfo">
<p>
© Copyright 2019, Torch Contributors.
</p>
</div>
<div>
Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
</div>
</footer>
</div>
</div>
<div class="pytorch-content-right" id="pytorch-content-right">
<div class="pytorch-right-menu" id="pytorch-right-menu">
<div class="pytorch-side-scroll" id="pytorch-side-scroll-right">
<ul>
<li><a class="reference internal" href="#">Distributed Autograd Design</a><ul>
<li><a class="reference internal" href="#background">Background</a></li>
<li><a class="reference internal" href="#autograd-recording-during-the-forward-pass">Autograd recording during the forward pass</a></li>
<li><a class="reference internal" href="#distributed-autograd-context">Distributed Autograd Context</a></li>
<li><a class="reference internal" href="#distributed-backward-pass">Distributed Backward Pass</a><ul>
<li><a class="reference internal" href="#computing-dependencies">Computing dependencies</a></li>
<li><a class="reference internal" href="#fast-mode-algorithm">FAST mode algorithm</a></li>
<li><a class="reference internal" href="#smart-mode-algorithm">SMART mode algorithm</a></li>
</ul>
</li>
<li><a class="reference internal" href="#distributed-optimizer">Distributed Optimizer</a></li>
<li><a class="reference internal" href="#simple-end-to-end-example">Simple end to end example</a></li>
</ul>
</li>
</ul>
</div>
</div>
</div>
</section>
</div>
<script type="text/javascript" id="documentation_options" data-url_root="../" src="../_static/documentation_options.js"></script>
<script src="../_static/jquery.js"></script>
<script src="../_static/underscore.js"></script>
<script src="../_static/doctools.js"></script>
<script src="../_static/clipboard.min.js"></script>
<script src="../_static/copybutton.js"></script>
<script type="text/javascript" src="../_static/js/vendor/popper.min.js"></script>
<script type="text/javascript" src="../_static/js/vendor/bootstrap.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/list.js/1.5.0/list.min.js"></script>
<script type="text/javascript" src="../_static/js/theme.js"></script>
<script type="text/javascript">
jQuery(function () {
SphinxRtdTheme.Navigation.enable(true);
});
</script>
<script script type="text/javascript">
var collapsedSections = ['Notes', 'Language Bindings', 'Libraries', 'Community'];
</script>
<img height="1" width="1" style="border-style:none;" alt="" src="https://www.googleadservices.com/pagead/conversion/795629140/?label=txkmCPmdtosBENSssfsC&guid=ON&script=0"/>
<!-- Begin Footer -->
<div class="container-fluid docs-tutorials-resources" id="docs-tutorials-resources">
<div class="container">
<div class="row">
<div class="col-md-4 text-center">
<h2>Docs</h2>
<p>Access comprehensive developer documentation for PyTorch</p>
<a class="with-right-arrow" href="https://pytorch.org/docs/stable/index.html">View Docs</a>
</div>
<div class="col-md-4 text-center">
<h2>Tutorials</h2>
<p>Get in-depth tutorials for beginners and advanced developers</p>
<a class="with-right-arrow" href="https://pytorch.org/tutorials">View Tutorials</a>
</div>
<div class="col-md-4 text-center">
<h2>Resources</h2>
<p>Find development resources and get your questions answered</p>
<a class="with-right-arrow" href="https://pytorch.org/resources">View Resources</a>
</div>
</div>
</div>
</div>
<footer class="site-footer">
<div class="container footer-container">
<div class="footer-logo-wrapper">
<a href="https://pytorch.org/" class="footer-logo"></a>
</div>
<div class="footer-links-wrapper">
<div class="footer-links-col">
<ul>
<li class="list-title"><a href="https://pytorch.org/">PyTorch</a></li>
<li><a href="https://pytorch.org/get-started">Get Started</a></li>
<li><a href="https://pytorch.org/features">Features</a></li>
<li><a href="https://pytorch.org/ecosystem">Ecosystem</a></li>
<li><a href="https://pytorch.org/blog/">Blog</a></li>
<li><a href="https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md">Contributing</a></li>
</ul>
</div>
<div class="footer-links-col">
<ul>
<li class="list-title"><a href="https://pytorch.org/resources">Resources</a></li>
<li><a href="https://pytorch.org/tutorials">Tutorials</a></li>
<li><a href="https://pytorch.org/docs/stable/index.html">Docs</a></li>
<li><a href="https://discuss.pytorch.org" target="_blank">Discuss</a></li>
<li><a href="https://github.com/pytorch/pytorch/issues" target="_blank">Github Issues</a></li>
<li><a href="https://pytorch.org/assets/brand-guidelines/PyTorch-Brand-Guidelines.pdf" target="_blank">Brand Guidelines</a></li>
</ul>
</div>
<div class="footer-links-col follow-us-col">
<ul>
<li class="list-title">Stay Connected</li>
<li>
<div id="mc_embed_signup">
<form
action="https://twitter.us14.list-manage.com/subscribe/post?u=75419c71fe0a935e53dfa4a3f&id=91d0dccd39"
method="post"
id="mc-embedded-subscribe-form"
name="mc-embedded-subscribe-form"
class="email-subscribe-form validate"
target="_blank"
novalidate>
<div id="mc_embed_signup_scroll" class="email-subscribe-form-fields-wrapper">
<div class="mc-field-group">
<label for="mce-EMAIL" style="display:none;">Email Address</label>
<input type="email" value="" name="EMAIL" class="required email" id="mce-EMAIL" placeholder="Email Address">
</div>
<div id="mce-responses" class="clear">
<div class="response" id="mce-error-response" style="display:none"></div>
<div class="response" id="mce-success-response" style="display:none"></div>
</div> <!-- real people should not fill this in and expect good things - do not remove this or risk form bot signups-->
<div style="position: absolute; left: -5000px;" aria-hidden="true"><input type="text" name="b_75419c71fe0a935e53dfa4a3f_91d0dccd39" tabindex="-1" value=""></div>
<div class="clear">
<input type="submit" value="" name="subscribe" id="mc-embedded-subscribe" class="button email-subscribe-button">
</div>
</div>
</form>
</div>
</li>
</ul>
<div class="footer-social-icons">
<a href="https://www.facebook.com/pytorch" target="_blank" class="facebook"></a>
<a href="https://twitter.com/pytorch" target="_blank" class="twitter"></a>
<a href="https://www.youtube.com/pytorch" target="_blank" class="youtube"></a>
</div>
</div>
</div>
</div>
</footer>
<div class="cookie-banner-wrapper">
<div class="container">
<p class="gdpr-notice">To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: <a href="https://www.facebook.com/policies/cookies/">Cookies Policy</a>.</p>
<img class="close-button" src="../_static/images/pytorch-x.svg">
</div>
</div>
<!-- End Footer -->
<!-- Begin Mobile Menu -->
<div class="mobile-main-menu">
<div class="container-fluid">
<div class="container">
<div class="mobile-main-menu-header-container">
<a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>
<a class="main-menu-close-button" href="#" data-behavior="close-mobile-menu"></a>
</div>
</div>
</div>
<div class="mobile-main-menu-links-container">
<div class="main-menu">
<ul>
<li>
<a href="https://pytorch.org/get-started">Get Started</a>
</li>
<li>
<a href="https://pytorch.org/ecosystem">Ecosystem</a>
</li>
<li>
<a href="https://pytorch.org/mobile">Mobile</a>
</li>
<li>
<a href="https://pytorch.org/hub">PyTorch Hub</a>
</li>
<li>
<a href="https://pytorch.org/blog/">Blog</a>
</li>
<li>
<a href="https://pytorch.org/tutorials">Tutorials</a>
</li>
<li class="resources-mobile-menu-title" class="active">
Docs