forked from pytorch/pytorch.github.io
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsparse.html
1424 lines (1237 loc) · 115 KB
/
sparse.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
<meta name="robots" content="noindex">
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>torch.sparse — PyTorch 1.9.0 documentation</title>
<link rel="canonical" href="https://pytorch.org/docs/stable/sparse.html"/>
<link rel="stylesheet" href="_static/css/theme.css" type="text/css" />
<!-- <link rel="stylesheet" href="_static/pygments.css" type="text/css" /> -->
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css" type="text/css" />
<link rel="stylesheet" href="_static/css/jit.css" type="text/css" />
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css" type="text/css" />
<link rel="stylesheet" href="_static/katex-math.css" type="text/css" />
<link rel="index" title="Index" href="genindex.html" />
<link rel="search" title="Search" href="search.html" />
<link rel="next" title="torch.Tensor.coalesce" href="generated/torch.Tensor.coalesce.html" />
<link rel="prev" title="torch.random" href="random.html" />
<!-- Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-117752657-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-117752657-2');
</script>
<!-- End Google Analytics -->
<script src="_static/js/modernizr.min.js"></script>
<!-- Preload the theme fonts -->
<link rel="preload" href="_static/fonts/FreightSans/freight-sans-book.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="_static/fonts/FreightSans/freight-sans-medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="_static/fonts/FreightSans/freight-sans-bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="_static/fonts/FreightSans/freight-sans-medium-italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<!-- Preload the katex fonts -->
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Math-Italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Main-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Main-Bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Size1-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Size4-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Size2-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Size3-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Caligraphic-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.15.2/css/all.css" integrity="sha384-vSIIfh2YWi9wW0r9iZe7RJPrKwp6bG+s9QZMoITbCckVJqGCCRhc+ccxNcdpHuYu" crossorigin="anonymous">
</head>
<div class="container-fluid header-holder tutorials-header" id="header-holder">
<div class="container">
<div class="header-container">
<a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>
<div class="main-menu">
<ul>
<li>
<a href="https://pytorch.org/get-started">Get Started</a>
</li>
<li>
<a href="https://pytorch.org/ecosystem">Ecosystem</a>
</li>
<li>
<a href="https://pytorch.org/mobile">Mobile</a>
</li>
<li>
<a href="https://pytorch.org/blog/">Blog</a>
</li>
<li>
<a href="https://pytorch.org/tutorials">Tutorials</a>
</li>
<li class="active docs-active">
<div id="resourcesDropdownButton" data-toggle="resources-dropdown" class="resources-dropdown">
<a class="resource-option with-down-orange-arrow">
Docs
</a>
<div class="resources-dropdown-menu">
<a class="doc-dropdown-option nav-dropdown-item" href="https://pytorch.org/docs/stable/index.html">
<span class="dropdown-title">PyTorch</span>
<p></p>
</a>
<a class="doc-dropdown-option nav-dropdown-item" href="https://pytorch.org/audio/stable/index.html">
<span class="dropdown-title">torchaudio</span>
<p></p>
</a>
<a class="doc-dropdown-option nav-dropdown-item" href="https://pytorch.org/text/stable/index.html">
<span class="dropdown-title">torchtext</span>
<p></p>
</a>
<a class="doc-dropdown-option nav-dropdown-item" href="https://pytorch.org/vision/stable/index.html">
<span class="dropdown-title">torchvision</span>
<p></p>
</a>
<a class="doc-dropdown-option nav-dropdown-item" href="https://pytorch.org/elastic/">
<span class="dropdown-title">TorchElastic</span>
<p></p>
</a>
<a class="doc-dropdown-option nav-dropdown-item" href="https://pytorch.org/serve/">
<span class="dropdown-title">TorchServe</span>
<p></p>
</a>
<a class="doc-dropdown-option nav-dropdown-item" href="https://pytorch.org/xla">
<span class="dropdown-title">PyTorch on XLA Devices</span>
<p></p>
</a>
</div>
</li>
<li>
<div id="resourcesDropdownButton" data-toggle="resources-dropdown" class="resources-dropdown">
<a class="resource-option with-down-arrow">
Resources
</a>
<div class="resources-dropdown-menu">
<a class="nav-dropdown-item" href="https://pytorch.org/features">
<span class="dropdown-title">About</span>
<p>Learn about PyTorch’s features and capabilities</p>
</a>
<a class="nav-dropdown-item" href="https://pytorch.org/#community-module">
<span class="dropdown-title">Community</span>
<p>Join the PyTorch developer community to contribute, learn, and get your questions answered.</p>
</a>
<a class="nav-dropdown-item" href="https://pytorch.org/resources">
<span class="dropdown-title">Developer Resources</span>
<p>Find resources and get questions answered</p>
</a>
<a class="nav-dropdown-item" href="https://discuss.pytorch.org/" target="_blank">
<span class="dropdown-title">Forums</span>
<p>A place to discuss PyTorch code, issues, install, research</p>
</a>
<a class="nav-dropdown-item" href="https://pytorch.org/hub">
<span class="dropdown-title">Models (Beta)</span>
<p>Discover, publish, and reuse pre-trained models</p>
</a>
</div>
</div>
</li>
<li>
<a href="https://github.com/pytorch/pytorch">GitHub</a>
</li>
</ul>
</div>
<a class="main-menu-open-button" href="#" data-behavior="open-mobile-menu"></a>
</div>
</div>
</div>
<body class="pytorch-body">
<div class="table-of-contents-link-wrapper">
<span>Table of Contents</span>
<a href="#" class="toggle-table-of-contents" data-behavior="toggle-table-of-contents"></a>
</div>
<nav data-toggle="wy-nav-shift" class="pytorch-left-menu" id="pytorch-left-menu">
<div class="pytorch-side-scroll">
<div class="pytorch-menu pytorch-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
<div class="pytorch-left-menu-search">
<div class="version">
<a href='https://pytorch.org/docs/versions.html'>1.9.0 ▼</a>
</div>
<div role="search">
<form id="rtd-search-form" class="wy-form" action="search.html" method="get">
<input type="text" name="q" placeholder="Search Docs" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
</div>
</div>
<p class="caption"><span class="caption-text">Notes</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="notes/amp_examples.html">Automatic Mixed Precision examples</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/autograd.html">Autograd mechanics</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/broadcasting.html">Broadcasting semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/cpu_threading_torchscript_inference.html">CPU threading and TorchScript inference</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/cuda.html">CUDA semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/ddp.html">Distributed Data Parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/extending.html">Extending PyTorch</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/faq.html">Frequently Asked Questions</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/gradcheck.html">Gradcheck mechanics</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/hip.html">HIP (ROCm) semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/large_scale_deployments.html">Features for large-scale deployments</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/modules.html">Modules</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/multiprocessing.html">Multiprocessing best practices</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/randomness.html">Reproducibility</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/serialization.html">Serialization semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/windows.html">Windows FAQ</a></li>
</ul>
<p class="caption"><span class="caption-text">Language Bindings</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="cpp_index.html">C++</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/javadoc/">Javadoc</a></li>
</ul>
<p class="caption"><span class="caption-text">Python API</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="torch.html">torch</a></li>
<li class="toctree-l1"><a class="reference internal" href="nn.html">torch.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="nn.functional.html">torch.nn.functional</a></li>
<li class="toctree-l1"><a class="reference internal" href="tensors.html">torch.Tensor</a></li>
<li class="toctree-l1"><a class="reference internal" href="tensor_attributes.html">Tensor Attributes</a></li>
<li class="toctree-l1"><a class="reference internal" href="tensor_view.html">Tensor Views</a></li>
<li class="toctree-l1"><a class="reference internal" href="autograd.html">torch.autograd</a></li>
<li class="toctree-l1"><a class="reference internal" href="cuda.html">torch.cuda</a></li>
<li class="toctree-l1"><a class="reference internal" href="amp.html">torch.cuda.amp</a></li>
<li class="toctree-l1"><a class="reference internal" href="backends.html">torch.backends</a></li>
<li class="toctree-l1"><a class="reference internal" href="distributed.html">torch.distributed</a></li>
<li class="toctree-l1"><a class="reference internal" href="distributed.elastic.html">torch.distributed.elastic</a></li>
<li class="toctree-l1"><a class="reference internal" href="distributed.optim.html">torch.distributed.optim</a></li>
<li class="toctree-l1"><a class="reference internal" href="distributions.html">torch.distributions</a></li>
<li class="toctree-l1"><a class="reference internal" href="fft.html">torch.fft</a></li>
<li class="toctree-l1"><a class="reference internal" href="futures.html">torch.futures</a></li>
<li class="toctree-l1"><a class="reference internal" href="fx.html">torch.fx</a></li>
<li class="toctree-l1"><a class="reference internal" href="hub.html">torch.hub</a></li>
<li class="toctree-l1"><a class="reference internal" href="jit.html">torch.jit</a></li>
<li class="toctree-l1"><a class="reference internal" href="linalg.html">torch.linalg</a></li>
<li class="toctree-l1"><a class="reference internal" href="special.html">torch.special</a></li>
<li class="toctree-l1"><a class="reference internal" href="torch.overrides.html">torch.overrides</a></li>
<li class="toctree-l1"><a class="reference internal" href="package.html">torch.package</a></li>
<li class="toctree-l1"><a class="reference internal" href="profiler.html">torch.profiler</a></li>
<li class="toctree-l1"><a class="reference internal" href="nn.init.html">torch.nn.init</a></li>
<li class="toctree-l1"><a class="reference internal" href="onnx.html">torch.onnx</a></li>
<li class="toctree-l1"><a class="reference internal" href="optim.html">torch.optim</a></li>
<li class="toctree-l1"><a class="reference internal" href="complex_numbers.html">Complex Numbers</a></li>
<li class="toctree-l1"><a class="reference internal" href="ddp_comm_hooks.html">DDP Communication Hooks</a></li>
<li class="toctree-l1"><a class="reference internal" href="pipeline.html">Pipeline Parallelism</a></li>
<li class="toctree-l1"><a class="reference internal" href="quantization.html">Quantization</a></li>
<li class="toctree-l1"><a class="reference internal" href="rpc.html">Distributed RPC Framework</a></li>
<li class="toctree-l1"><a class="reference internal" href="random.html">torch.random</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">torch.sparse</a></li>
<li class="toctree-l1"><a class="reference internal" href="storage.html">torch.Storage</a></li>
<li class="toctree-l1"><a class="reference internal" href="testing.html">torch.testing</a></li>
<li class="toctree-l1"><a class="reference internal" href="benchmark_utils.html">torch.utils.benchmark</a></li>
<li class="toctree-l1"><a class="reference internal" href="bottleneck.html">torch.utils.bottleneck</a></li>
<li class="toctree-l1"><a class="reference internal" href="checkpoint.html">torch.utils.checkpoint</a></li>
<li class="toctree-l1"><a class="reference internal" href="cpp_extension.html">torch.utils.cpp_extension</a></li>
<li class="toctree-l1"><a class="reference internal" href="data.html">torch.utils.data</a></li>
<li class="toctree-l1"><a class="reference internal" href="dlpack.html">torch.utils.dlpack</a></li>
<li class="toctree-l1"><a class="reference internal" href="mobile_optimizer.html">torch.utils.mobile_optimizer</a></li>
<li class="toctree-l1"><a class="reference internal" href="model_zoo.html">torch.utils.model_zoo</a></li>
<li class="toctree-l1"><a class="reference internal" href="tensorboard.html">torch.utils.tensorboard</a></li>
<li class="toctree-l1"><a class="reference internal" href="type_info.html">Type Info</a></li>
<li class="toctree-l1"><a class="reference internal" href="named_tensor.html">Named Tensors</a></li>
<li class="toctree-l1"><a class="reference internal" href="name_inference.html">Named Tensors operator coverage</a></li>
<li class="toctree-l1"><a class="reference internal" href="__config__.html">torch.__config__</a></li>
</ul>
<p class="caption"><span class="caption-text">Libraries</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/audio/stable">torchaudio</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/text/stable">torchtext</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/vision/stable">torchvision</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/serve">TorchServe</a></li>
<li class="toctree-l1"><a class="reference external" href="http://pytorch.org/xla/">PyTorch on XLA Devices</a></li>
</ul>
<p class="caption"><span class="caption-text">Community</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="community/contribution_guide.html">PyTorch Contribution Guide</a></li>
<li class="toctree-l1"><a class="reference internal" href="community/governance.html">PyTorch Governance</a></li>
<li class="toctree-l1"><a class="reference internal" href="community/persons_of_interest.html">PyTorch Governance | Persons of Interest</a></li>
</ul>
</div>
</div>
</nav>
<div class="pytorch-container">
<div class="pytorch-page-level-bar" id="pytorch-page-level-bar">
<div class="pytorch-breadcrumbs-wrapper">
<div role="navigation" aria-label="breadcrumbs navigation">
<ul class="pytorch-breadcrumbs">
<li>
<a href="index.html">
Docs
</a> >
</li>
<li>torch.sparse</li>
<li class="pytorch-breadcrumbs-aside">
<a href="_sources/sparse.rst.txt" rel="nofollow"><img src="_static/images/view-page-source-icon.svg"></a>
</li>
</ul>
</div>
</div>
<div class="pytorch-shortcuts-wrapper" id="pytorch-shortcuts-wrapper">
Shortcuts
</div>
</div>
<section data-toggle="wy-nav-shift" id="pytorch-content-wrap" class="pytorch-content-wrap">
<div class="pytorch-content-left">
<div class="rst-content">
<div role="main" class="main-content" itemscope="itemscope" itemtype="http://schema.org/Article">
<article itemprop="articleBody" id="pytorch-article" class="pytorch-article">
<div class="section" id="torch-sparse">
<span id="sparse-docs"></span><h1>torch.sparse<a class="headerlink" href="#torch-sparse" title="Permalink to this headline">¶</a></h1>
<div class="section" id="introduction">
<h2>Introduction<a class="headerlink" href="#introduction" title="Permalink to this headline">¶</a></h2>
<p>PyTorch provides <a class="reference internal" href="tensors.html#torch.Tensor" title="torch.Tensor"><code class="xref py py-class docutils literal notranslate"><span class="pre">torch.Tensor</span></code></a> to represent a
multi-dimensional array containing elements of a single data type. By
default, array elements are stored contiguously in memory leading to
efficient implementations of various array processing algorithms that
relay on the fast access to array elements. However, there exists an
important class of multi-dimensional arrays, so-called sparse arrays,
where the contiguous memory storage of array elements turns out to be
suboptimal. Sparse arrays have a property of having a vast portion of
elements being equal to zero which means that a lot of memory as well
as processor resources can be spared if only the non-zero elements are
stored or/and processed. Various sparse storage formats (<a class="reference external" href="https://en.wikipedia.org/wiki/Sparse_matrix">such as COO,
CSR/CSC, LIL, etc.</a>) have been developed that are optimized for a
particular structure of non-zero elements in sparse arrays as well as
for specific operations on the arrays.</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>When talking about storing only non-zero elements of a sparse
array, the usage of adjective “non-zero” is not strict: one is
allowed to store also zeros in the sparse array data
structure. Hence, in the following, we use “specified elements” for
those array elements that are actually stored. In addition, the
unspecified elements are typically assumed to have zero value, but
not only, hence we use the term “fill value” to denote such
elements.</p>
</div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Using a sparse storage format for storing sparse arrays can be
advantageous only when the size and sparsity levels of arrays are
high. Otherwise, for small-sized or low-sparsity arrays using the
contiguous memory storage format is likely the most efficient
approach.</p>
</div>
<div class="admonition warning">
<p class="admonition-title">Warning</p>
<p>The PyTorch API of sparse tensors is in beta and may change in the near future.</p>
</div>
</div>
<div class="section" id="sparse-coo-tensors">
<span id="sparse-coo-docs"></span><h2>Sparse COO tensors<a class="headerlink" href="#sparse-coo-tensors" title="Permalink to this headline">¶</a></h2>
<p>PyTorch implements the so-called Coordinate format, or COO
format, as one of the storage formats for implementing sparse
tensors. In COO format, the specified elements are stored as tuples
of element indices and the corresponding values. In particular,</p>
<blockquote>
<div><ul class="simple">
<li><p>the indices of specified elements are collected in <code class="docutils literal notranslate"><span class="pre">indices</span></code>
tensor of size <code class="docutils literal notranslate"><span class="pre">(ndim,</span> <span class="pre">nse)</span></code> and with element type
<code class="docutils literal notranslate"><span class="pre">torch.int64</span></code>,</p></li>
<li><p>the corresponding values are collected in <code class="docutils literal notranslate"><span class="pre">values</span></code> tensor of
size <code class="docutils literal notranslate"><span class="pre">(nse,)</span></code> and with an arbitrary integer or floating point
number element type,</p></li>
</ul>
</div></blockquote>
<p>where <code class="docutils literal notranslate"><span class="pre">ndim</span></code> is the dimensionality of the tensor and <code class="docutils literal notranslate"><span class="pre">nse</span></code> is the
number of specified elements.</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>The memory consumption of a sparse COO tensor is at least <code class="docutils literal notranslate"><span class="pre">(ndim</span> <span class="pre">*</span>
<span class="pre">8</span> <span class="pre">+</span> <span class="pre"><size</span> <span class="pre">of</span> <span class="pre">element</span> <span class="pre">type</span> <span class="pre">in</span> <span class="pre">bytes>)</span> <span class="pre">*</span> <span class="pre">nse</span></code> bytes (plus a constant
overhead from storing other tensor data).</p>
<p>The memory consumption of a strided tensor is at least
<code class="docutils literal notranslate"><span class="pre">product(<tensor</span> <span class="pre">shape>)</span> <span class="pre">*</span> <span class="pre"><size</span> <span class="pre">of</span> <span class="pre">element</span> <span class="pre">type</span> <span class="pre">in</span> <span class="pre">bytes></span></code>.</p>
<p>For example, the memory consumption of a 10 000 x 10 000 tensor
with 100 000 non-zero 32-bit floating point numbers is at least
<code class="docutils literal notranslate"><span class="pre">(2</span> <span class="pre">*</span> <span class="pre">8</span> <span class="pre">+</span> <span class="pre">4)</span> <span class="pre">*</span> <span class="pre">100</span> <span class="pre">000</span> <span class="pre">=</span> <span class="pre">2</span> <span class="pre">000</span> <span class="pre">000</span></code> bytes when using COO tensor
layout and <code class="docutils literal notranslate"><span class="pre">10</span> <span class="pre">000</span> <span class="pre">*</span> <span class="pre">10</span> <span class="pre">000</span> <span class="pre">*</span> <span class="pre">4</span> <span class="pre">=</span> <span class="pre">400</span> <span class="pre">000</span> <span class="pre">000</span></code> bytes when using
the default strided tensor layout. Notice the 200 fold memory
saving from using the COO storage format.</p>
</div>
<div class="section" id="construction">
<h3>Construction<a class="headerlink" href="#construction" title="Permalink to this headline">¶</a></h3>
<p>A sparse COO tensor can be constructed by providing the two tensors of
indices and values, as well as the size of the sparse tensor (when it
cannot be inferred from the indices and values tensors) to a function
<a class="reference internal" href="generated/torch.sparse_coo_tensor.html#torch.sparse_coo_tensor" title="torch.sparse_coo_tensor"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.sparse_coo_tensor()</span></code></a>.</p>
<p>Suppose we want to define a sparse tensor with the entry 3 at location
(0, 2), entry 4 at location (1, 0), and entry 5 at location (1, 2).
Unspecified elements are assumed to have the same value, fill value,
which is zero by default. We would then write:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">i</span> <span class="o">=</span> <span class="p">[[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">],</span>
<span class="go"> [2, 0, 2]]</span>
<span class="gp">>>> </span><span class="n">v</span> <span class="o">=</span> <span class="p">[</span><span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">]</span>
<span class="gp">>>> </span><span class="n">s</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">sparse_coo_tensor</span><span class="p">(</span><span class="n">i</span><span class="p">,</span> <span class="n">v</span><span class="p">,</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">))</span>
<span class="gp">>>> </span><span class="n">s</span>
<span class="go">tensor(indices=tensor([[0, 1, 1],</span>
<span class="go"> [2, 0, 2]]),</span>
<span class="go"> values=tensor([3, 4, 5]),</span>
<span class="go"> size=(2, 3), nnz=3, layout=torch.sparse_coo)</span>
<span class="gp">>>> </span><span class="n">s</span><span class="o">.</span><span class="n">to_dense</span><span class="p">()</span>
<span class="go">tensor([[0, 0, 3],</span>
<span class="go"> [4, 0, 5]])</span>
</pre></div>
</div>
<p>Note that the input <code class="docutils literal notranslate"><span class="pre">i</span></code> is NOT a list of index tuples. If you want
to write your indices this way, you should transpose before passing them to
the sparse constructor:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">i</span> <span class="o">=</span> <span class="p">[[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">2</span><span class="p">],</span> <span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">0</span><span class="p">],</span> <span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">]]</span>
<span class="gp">>>> </span><span class="n">v</span> <span class="o">=</span> <span class="p">[</span><span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span> <span class="p">]</span>
<span class="gp">>>> </span><span class="n">s</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">sparse_coo_tensor</span><span class="p">(</span><span class="nb">list</span><span class="p">(</span><span class="nb">zip</span><span class="p">(</span><span class="o">*</span><span class="n">i</span><span class="p">)),</span> <span class="n">v</span><span class="p">,</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">))</span>
<span class="gp">>>> </span><span class="c1"># Or another equivalent formulation to get s</span>
<span class="gp">>>> </span><span class="n">s</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">sparse_coo_tensor</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">(</span><span class="n">i</span><span class="p">)</span><span class="o">.</span><span class="n">t</span><span class="p">(),</span> <span class="n">v</span><span class="p">,</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">))</span>
<span class="gp">>>> </span><span class="n">torch</span><span class="o">.</span><span class="n">sparse_coo_tensor</span><span class="p">(</span><span class="n">i</span><span class="o">.</span><span class="n">t</span><span class="p">(),</span> <span class="n">v</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">Size</span><span class="p">([</span><span class="mi">2</span><span class="p">,</span><span class="mi">3</span><span class="p">]))</span><span class="o">.</span><span class="n">to_dense</span><span class="p">()</span>
<span class="go">tensor([[0, 0, 3],</span>
<span class="go"> [4, 0, 5]])</span>
</pre></div>
</div>
<p>An empty sparse COO tensor can be constructed by specifying its size
only:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">torch</span><span class="o">.</span><span class="n">sparse_coo_tensor</span><span class="p">(</span><span class="n">size</span><span class="o">=</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">))</span>
<span class="go">tensor(indices=tensor([], size=(2, 0)),</span>
<span class="go"> values=tensor([], size=(0,)),</span>
<span class="go"> size=(2, 3), nnz=0, layout=torch.sparse_coo)</span>
</pre></div>
</div>
</div>
<div class="section" id="hybrid-sparse-coo-tensors">
<span id="sparse-hybrid-coo-docs"></span><h3>Hybrid sparse COO tensors<a class="headerlink" href="#hybrid-sparse-coo-tensors" title="Permalink to this headline">¶</a></h3>
<p>Pytorch implements an extension of sparse tensors with scalar values
to sparse tensors with (contiguous) tensor values. Such tensors are
called hybrid tensors.</p>
<p>PyTorch hybrid COO tensor extends the sparse COO tensor by allowing
the <code class="docutils literal notranslate"><span class="pre">values</span></code> tensor to be a multi-dimensional tensor so that we
have:</p>
<blockquote>
<div><ul class="simple">
<li><p>the indices of specified elements are collected in <code class="docutils literal notranslate"><span class="pre">indices</span></code>
tensor of size <code class="docutils literal notranslate"><span class="pre">(sparse_dims,</span> <span class="pre">nse)</span></code> and with element type
<code class="docutils literal notranslate"><span class="pre">torch.int64</span></code>,</p></li>
<li><p>the corresponding (tensor) values are collected in <code class="docutils literal notranslate"><span class="pre">values</span></code>
tensor of size <code class="docutils literal notranslate"><span class="pre">(nse,</span> <span class="pre">dense_dims)</span></code> and with an arbitrary integer
or floating point number element type.</p></li>
</ul>
</div></blockquote>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>We use (M + K)-dimensional tensor to denote a N-dimensional hybrid
sparse tensor, where M and K are the numbers of sparse and dense
dimensions, respectively, such that M + K == N holds.</p>
</div>
<p>Suppose we want to create a (2 + 1)-dimensional tensor with the entry
[3, 4] at location (0, 2), entry [5, 6] at location (1, 0), and entry
[7, 8] at location (1, 2). We would write</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">i</span> <span class="o">=</span> <span class="p">[[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">],</span>
<span class="go"> [2, 0, 2]]</span>
<span class="gp">>>> </span><span class="n">v</span> <span class="o">=</span> <span class="p">[[</span><span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">],</span> <span class="p">[</span><span class="mi">5</span><span class="p">,</span> <span class="mi">6</span><span class="p">],</span> <span class="p">[</span><span class="mi">7</span><span class="p">,</span> <span class="mi">8</span><span class="p">]]</span>
<span class="gp">>>> </span><span class="n">s</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">sparse_coo_tensor</span><span class="p">(</span><span class="n">i</span><span class="p">,</span> <span class="n">v</span><span class="p">,</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">2</span><span class="p">))</span>
<span class="gp">>>> </span><span class="n">s</span>
<span class="go">tensor(indices=tensor([[0, 1, 1],</span>
<span class="go"> [2, 0, 2]]),</span>
<span class="go"> values=tensor([[3, 4],</span>
<span class="go"> [5, 6],</span>
<span class="go"> [7, 8]]),</span>
<span class="go"> size=(2, 3, 2), nnz=3, layout=torch.sparse_coo)</span>
</pre></div>
</div>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">s</span><span class="o">.</span><span class="n">to_dense</span><span class="p">()</span>
<span class="go">tensor([[[0, 0],</span>
<span class="go"> [0, 0],</span>
<span class="go"> [3, 4]],</span>
<span class="go"> [[5, 6],</span>
<span class="go"> [0, 0],</span>
<span class="go"> [7, 8]]])</span>
</pre></div>
</div>
<p>In general, if <code class="docutils literal notranslate"><span class="pre">s</span></code> is a sparse COO tensor and <code class="docutils literal notranslate"><span class="pre">M</span> <span class="pre">=</span>
<span class="pre">s.sparse_dim()</span></code>, <code class="docutils literal notranslate"><span class="pre">K</span> <span class="pre">=</span> <span class="pre">s.dense_dim()</span></code>, then we have the following
invariants:</p>
<blockquote>
<div><ul class="simple">
<li><p><code class="docutils literal notranslate"><span class="pre">M</span> <span class="pre">+</span> <span class="pre">K</span> <span class="pre">==</span> <span class="pre">len(s.shape)</span> <span class="pre">==</span> <span class="pre">s.ndim</span></code> - dimensionality of a tensor
is the sum of the number of sparse and dense dimensions,</p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">s.indices().shape</span> <span class="pre">==</span> <span class="pre">(M,</span> <span class="pre">nse)</span></code> - sparse indices are stored
explicitly,</p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">s.values().shape</span> <span class="pre">==</span> <span class="pre">(nse,)</span> <span class="pre">+</span> <span class="pre">s.shape[M</span> <span class="pre">:</span> <span class="pre">M</span> <span class="pre">+</span> <span class="pre">K]</span></code> - the values
of a hybrid tensor are K-dimensional tensors,</p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">s.values().layout</span> <span class="pre">==</span> <span class="pre">torch.strided</span></code> - values are stored as
strided tensors.</p></li>
</ul>
</div></blockquote>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Dense dimensions always follow sparse dimensions, that is, mixing
of dense and sparse dimensions is not supported.</p>
</div>
</div>
<div class="section" id="uncoalesced-sparse-coo-tensors">
<span id="sparse-uncoalesced-coo-docs"></span><h3>Uncoalesced sparse COO tensors<a class="headerlink" href="#uncoalesced-sparse-coo-tensors" title="Permalink to this headline">¶</a></h3>
<p>PyTorch sparse COO tensor format permits <em>uncoalesced</em> sparse tensors,
where there may be duplicate coordinates in the indices; in this case,
the interpretation is that the value at that index is the sum of all
duplicate value entries. For example, one can specify multiple values,
<code class="docutils literal notranslate"><span class="pre">3</span></code> and <code class="docutils literal notranslate"><span class="pre">4</span></code>, for the same index <code class="docutils literal notranslate"><span class="pre">1</span></code>, that leads to an 1-D
uncoalesced tensor:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">i</span> <span class="o">=</span> <span class="p">[[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">]]</span>
<span class="gp">>>> </span><span class="n">v</span> <span class="o">=</span> <span class="p">[</span><span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">]</span>
<span class="gp">>>> </span><span class="n">s</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">sparse_coo_tensor</span><span class="p">(</span><span class="n">i</span><span class="p">,</span> <span class="n">v</span><span class="p">,</span> <span class="p">(</span><span class="mi">3</span><span class="p">,))</span>
<span class="gp">>>> </span><span class="n">s</span>
<span class="go">tensor(indices=tensor([[1, 1]]),</span>
<span class="go"> values=tensor( [3, 4]),</span>
<span class="go"> size=(3,), nnz=2, layout=torch.sparse_coo)</span>
</pre></div>
</div>
<p>while the coalescing process will accumulate the multi-valued elements
into a single value using summation:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">s</span><span class="o">.</span><span class="n">coalesce</span><span class="p">()</span>
<span class="go">tensor(indices=tensor([[1]]),</span>
<span class="go"> values=tensor([7]),</span>
<span class="go"> size=(3,), nnz=1, layout=torch.sparse_coo)</span>
</pre></div>
</div>
<p>In general, the output of <a class="reference internal" href="generated/torch.Tensor.coalesce.html#torch.Tensor.coalesce" title="torch.Tensor.coalesce"><code class="xref py py-meth docutils literal notranslate"><span class="pre">torch.Tensor.coalesce()</span></code></a> method is a
sparse tensor with the following properties:</p>
<ul class="simple">
<li><p>the indices of specified tensor elements are unique,</p></li>
<li><p>the indices are sorted in lexicographical order,</p></li>
<li><p><a class="reference internal" href="generated/torch.Tensor.is_coalesced.html#torch.Tensor.is_coalesced" title="torch.Tensor.is_coalesced"><code class="xref py py-meth docutils literal notranslate"><span class="pre">torch.Tensor.is_coalesced()</span></code></a> returns <code class="docutils literal notranslate"><span class="pre">True</span></code>.</p></li>
</ul>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>For the most part, you shouldn’t have to care whether or not a
sparse tensor is coalesced or not, as most operations will work
identically given a coalesced or uncoalesced sparse tensor.</p>
<p>However, some operations can be implemented more efficiently on
uncoalesced tensors, and some on coalesced tensors.</p>
<p>For instance, addition of sparse COO tensors is implemented by
simply concatenating the indices and values tensors:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">a</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">sparse_coo_tensor</span><span class="p">([[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">]],</span> <span class="p">[</span><span class="mi">5</span><span class="p">,</span> <span class="mi">6</span><span class="p">],</span> <span class="p">(</span><span class="mi">2</span><span class="p">,))</span>
<span class="gp">>>> </span><span class="n">b</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">sparse_coo_tensor</span><span class="p">([[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">]],</span> <span class="p">[</span><span class="mi">7</span><span class="p">,</span> <span class="mi">8</span><span class="p">],</span> <span class="p">(</span><span class="mi">2</span><span class="p">,))</span>
<span class="gp">>>> </span><span class="n">a</span> <span class="o">+</span> <span class="n">b</span>
<span class="go">tensor(indices=tensor([[0, 0, 1, 1]]),</span>
<span class="go"> values=tensor([7, 8, 5, 6]),</span>
<span class="go"> size=(2,), nnz=4, layout=torch.sparse_coo)</span>
</pre></div>
</div>
<p>If you repeatedly perform an operation that can produce duplicate
entries (e.g., <a class="reference internal" href="generated/torch.Tensor.add.html#torch.Tensor.add" title="torch.Tensor.add"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.Tensor.add()</span></code></a>), you should occasionally
coalesce your sparse tensors to prevent them from growing too large.</p>
<p>On the other hand, the lexicographical ordering of indices can be
advantageous for implementing algorithms that involve many element
selection operations, such as slicing or matrix products.</p>
</div>
</div>
<div class="section" id="working-with-sparse-coo-tensors">
<h3>Working with sparse COO tensors<a class="headerlink" href="#working-with-sparse-coo-tensors" title="Permalink to this headline">¶</a></h3>
<p>Let’s consider the following example:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">i</span> <span class="o">=</span> <span class="p">[[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">],</span>
<span class="go"> [2, 0, 2]]</span>
<span class="gp">>>> </span><span class="n">v</span> <span class="o">=</span> <span class="p">[[</span><span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">],</span> <span class="p">[</span><span class="mi">5</span><span class="p">,</span> <span class="mi">6</span><span class="p">],</span> <span class="p">[</span><span class="mi">7</span><span class="p">,</span> <span class="mi">8</span><span class="p">]]</span>
<span class="gp">>>> </span><span class="n">s</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">sparse_coo_tensor</span><span class="p">(</span><span class="n">i</span><span class="p">,</span> <span class="n">v</span><span class="p">,</span> <span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">2</span><span class="p">))</span>
</pre></div>
</div>
<p>As mentioned above, a sparse COO tensor is a <a class="reference internal" href="tensors.html#torch.Tensor" title="torch.Tensor"><code class="xref py py-class docutils literal notranslate"><span class="pre">torch.Tensor</span></code></a>
instance and to distinguish it from the <cite>Tensor</cite> instances that use
some other layout, on can use <a class="reference internal" href="generated/torch.Tensor.is_sparse.html#torch.Tensor.is_sparse" title="torch.Tensor.is_sparse"><code class="xref py py-attr docutils literal notranslate"><span class="pre">torch.Tensor.is_sparse</span></code></a> or
<code class="xref py py-attr docutils literal notranslate"><span class="pre">torch.Tensor.layout</span></code> properties:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="nb">isinstance</span><span class="p">(</span><span class="n">s</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">)</span>
<span class="go">True</span>
<span class="gp">>>> </span><span class="n">s</span><span class="o">.</span><span class="n">is_sparse</span>
<span class="go">True</span>
<span class="gp">>>> </span><span class="n">s</span><span class="o">.</span><span class="n">layout</span> <span class="o">==</span> <span class="n">torch</span><span class="o">.</span><span class="n">sparse_coo</span>
<span class="go">True</span>
</pre></div>
</div>
<p>The number of sparse and dense dimensions can be acquired using
methods <a class="reference internal" href="generated/torch.Tensor.sparse_dim.html#torch.Tensor.sparse_dim" title="torch.Tensor.sparse_dim"><code class="xref py py-meth docutils literal notranslate"><span class="pre">torch.Tensor.sparse_dim()</span></code></a> and
<a class="reference internal" href="generated/torch.Tensor.dense_dim.html#torch.Tensor.dense_dim" title="torch.Tensor.dense_dim"><code class="xref py py-meth docutils literal notranslate"><span class="pre">torch.Tensor.dense_dim()</span></code></a>, respectively. For instance:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">s</span><span class="o">.</span><span class="n">sparse_dim</span><span class="p">(),</span> <span class="n">s</span><span class="o">.</span><span class="n">dense_dim</span><span class="p">()</span>
<span class="go">(2, 1)</span>
</pre></div>
</div>
<p>If <code class="docutils literal notranslate"><span class="pre">s</span></code> is a sparse COO tensor then its COO format data can be
acquired using methods <a class="reference internal" href="generated/torch.Tensor.indices.html#torch.Tensor.indices" title="torch.Tensor.indices"><code class="xref py py-meth docutils literal notranslate"><span class="pre">torch.Tensor.indices()</span></code></a> and
<a class="reference internal" href="generated/torch.Tensor.values.html#torch.Tensor.values" title="torch.Tensor.values"><code class="xref py py-meth docutils literal notranslate"><span class="pre">torch.Tensor.values()</span></code></a>.</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Currently, one can acquire the COO format data only when the tensor
instance is coalesced:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">s</span><span class="o">.</span><span class="n">indices</span><span class="p">()</span>
<span class="go">RuntimeError: Cannot get indices on an uncoalesced tensor, please call .coalesce() first</span>
</pre></div>
</div>
<p>For acquiring the COO format data of an uncoalesced tensor, use
<code class="xref py py-func docutils literal notranslate"><span class="pre">torch.Tensor._values()</span></code> and <code class="xref py py-func docutils literal notranslate"><span class="pre">torch.Tensor._indices()</span></code>:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">s</span><span class="o">.</span><span class="n">_indices</span><span class="p">()</span>
<span class="go">tensor([[0, 1, 1],</span>
<span class="go"> [2, 0, 2]])</span>
</pre></div>
</div>
</div>
<p>Constructing a new sparse COO tensor results a tensor that is not
coalesced:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">s</span><span class="o">.</span><span class="n">is_coalesced</span><span class="p">()</span>
<span class="go">False</span>
</pre></div>
</div>
<p>but one can construct a coalesced copy of a sparse COO tensor using
the <a class="reference internal" href="generated/torch.Tensor.coalesce.html#torch.Tensor.coalesce" title="torch.Tensor.coalesce"><code class="xref py py-meth docutils literal notranslate"><span class="pre">torch.Tensor.coalesce()</span></code></a> method:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">s2</span> <span class="o">=</span> <span class="n">s</span><span class="o">.</span><span class="n">coalesce</span><span class="p">()</span>
<span class="gp">>>> </span><span class="n">s2</span><span class="o">.</span><span class="n">indices</span><span class="p">()</span>
<span class="go">tensor([[0, 1, 1],</span>
<span class="go"> [2, 0, 2]])</span>
</pre></div>
</div>
<p>When working with uncoalesced sparse COO tensors, one must take into
an account the additive nature of uncoalesced data: the values of the
same indices are the terms of a sum that evaluation gives the value of
the corresponding tensor element. For example, the scalar
multiplication on an uncoalesced sparse tensor could be implemented by
multiplying all the uncoalesced values with the scalar because <code class="docutils literal notranslate"><span class="pre">c</span> <span class="pre">*</span>
<span class="pre">(a</span> <span class="pre">+</span> <span class="pre">b)</span> <span class="pre">==</span> <span class="pre">c</span> <span class="pre">*</span> <span class="pre">a</span> <span class="pre">+</span> <span class="pre">c</span> <span class="pre">*</span> <span class="pre">b</span></code> holds. However, any nonlinear operation,
say, a square root, cannot be implemented by applying the operation to
uncoalesced data because <code class="docutils literal notranslate"><span class="pre">sqrt(a</span> <span class="pre">+</span> <span class="pre">b)</span> <span class="pre">==</span> <span class="pre">sqrt(a)</span> <span class="pre">+</span> <span class="pre">sqrt(b)</span></code> does not
hold in general.</p>
<p>Slicing (with positive step) of a sparse COO tensor is supported only
for dense dimensions. Indexing is supported for both sparse and dense
dimensions:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">s</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span>
<span class="go">tensor(indices=tensor([[0, 2]]),</span>
<span class="go"> values=tensor([[5, 6],</span>
<span class="go"> [7, 8]]),</span>
<span class="go"> size=(3, 2), nnz=2, layout=torch.sparse_coo)</span>
<span class="gp">>>> </span><span class="n">s</span><span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">]</span>
<span class="go">tensor(6)</span>
<span class="gp">>>> </span><span class="n">s</span><span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">:]</span>
<span class="go">tensor([6])</span>
</pre></div>
</div>
<p>In PyTorch, the fill value of a sparse tensor cannot be specified
explicitly and is assumed to be zero in general. However, there exists
operations that may interpret the fill value differently. For
instance, <a class="reference internal" href="generated/torch.sparse.softmax.html#torch.sparse.softmax" title="torch.sparse.softmax"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.sparse.softmax()</span></code></a> computes the softmax with the
assumption that the fill value is negative infinity.</p>
</div>
</div>
<div class="section" id="sparse-csr-tensor">
<span id="sparse-csr-docs"></span><h2>Sparse CSR Tensor<a class="headerlink" href="#sparse-csr-tensor" title="Permalink to this headline">¶</a></h2>
<p>The CSR (Compressed Sparse Row) sparse tensor format implements the CSR format
for storage of 2 dimensional tensors. Although there is no support for N-dimensional
tensors, the primary advantage over the COO format is better use of storage and
much faster computation operations such as sparse matrix-vector multiplication
using MKL and MAGMA backends. CUDA support does not exist as of now.</p>
<p>A CSR sparse tensor consists of three 1-D tensors: <code class="docutils literal notranslate"><span class="pre">crow_indices</span></code>, <code class="docutils literal notranslate"><span class="pre">col_indices</span></code>
and <code class="docutils literal notranslate"><span class="pre">values</span></code>:</p>
<blockquote>
<div><ul class="simple">
<li><p>The <code class="docutils literal notranslate"><span class="pre">crow_indices</span></code> tensor consists of compressed row indices. This is a 1-D tensor
of size <code class="docutils literal notranslate"><span class="pre">size[0]</span> <span class="pre">+</span> <span class="pre">1</span></code>. The last element is the number of non-zeros. This tensor
encodes the index in <code class="docutils literal notranslate"><span class="pre">values</span></code> and <code class="docutils literal notranslate"><span class="pre">col_indices</span></code> depending on where the given row
starts. Each successive number in the tensor subtracted by the number before it denotes
the number of elements in a given row.</p></li>
<li><p>The <code class="docutils literal notranslate"><span class="pre">col_indices</span></code> tensor contains the column indices of each value. This is a 1-D
tensor of size <code class="docutils literal notranslate"><span class="pre">nnz</span></code>.</p></li>
<li><p>The <code class="docutils literal notranslate"><span class="pre">values</span></code> tensor contains the values of the CSR tensor. This is a 1-D tensor
of size <code class="docutils literal notranslate"><span class="pre">nnz</span></code>.</p></li>
</ul>
</div></blockquote>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>The index tensors <code class="docutils literal notranslate"><span class="pre">crow_indices</span></code> and <code class="docutils literal notranslate"><span class="pre">col_indices</span></code> should have element type either
<code class="docutils literal notranslate"><span class="pre">torch.int64</span></code> (default) or <code class="docutils literal notranslate"><span class="pre">torch.int32</span></code>. If you want to use MKL-enabled matrix
operations, use <code class="docutils literal notranslate"><span class="pre">torch.int32</span></code>. This is as a result of the default linking of pytorch
being with MKL LP64, which uses 32 bit integer indexing.</p>
</div>
<div class="section" id="construction-of-csr-tensors">
<h3>Construction of CSR tensors<a class="headerlink" href="#construction-of-csr-tensors" title="Permalink to this headline">¶</a></h3>
<p>Sparse CSR matrices can be directly constructed by using the <a class="reference internal" href="generated/torch._sparse_csr_tensor.html#torch._sparse_csr_tensor" title="torch._sparse_csr_tensor"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch._sparse_csr_tensor()</span></code></a>
method. The user must supply the row and column indices and values tensors separately.
The <code class="docutils literal notranslate"><span class="pre">size</span></code> argument is optional and will be deduced from the the <code class="docutils literal notranslate"><span class="pre">crow_indices</span></code>
and <code class="docutils literal notranslate"><span class="pre">col_indices</span></code> if it is not present.</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">crow_indices</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">([</span><span class="mi">0</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">4</span><span class="p">])</span>
<span class="gp">>>> </span><span class="n">col_indices</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">([</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">])</span>
<span class="gp">>>> </span><span class="n">values</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">([</span><span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">])</span>
<span class="gp">>>> </span><span class="n">csr</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">_sparse_csr_tensor</span><span class="p">(</span><span class="n">crow_indices</span><span class="p">,</span> <span class="n">col_indices</span><span class="p">,</span> <span class="n">values</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">double</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">csr</span>
<span class="go">tensor(crow_indices=tensor([0, 2, 4]),</span>
<span class="go"> col_indices=tensor([0, 1, 0, 1]),</span>
<span class="go"> values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,</span>
<span class="go"> dtype=torch.float64)</span>
<span class="gp">>>> </span><span class="n">csr</span><span class="o">.</span><span class="n">to_dense</span><span class="p">()</span>
<span class="go">tensor([[1., 2.],</span>
<span class="go"> [3., 4.]], dtype=torch.float64)</span>
</pre></div>
</div>
</div>
<div class="section" id="csr-tensor-operations">
<h3>CSR Tensor Operations<a class="headerlink" href="#csr-tensor-operations" title="Permalink to this headline">¶</a></h3>
<p>The simplest way of constructing a sparse CSR tensor from a strided or sparse COO
tensor is to use <code class="xref py py-meth docutils literal notranslate"><span class="pre">tensor._to_sparse_csr()</span></code>. Any zeros in the (strided) tensor will
be interpreted as missing values in the sparse tensor:</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">a</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">([[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">0</span><span class="p">],</span> <span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">],</span> <span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">]],</span> <span class="n">dtype</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">float64</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">sp</span> <span class="o">=</span> <span class="n">a</span><span class="o">.</span><span class="n">_to_sparse_csr</span><span class="p">()</span>
<span class="gp">>>> </span><span class="n">sp</span>
<span class="go">tensor(crow_indices=tensor([0, 1, 3, 3]),</span>
<span class="go"> col_indices=tensor([2, 0, 1]),</span>
<span class="go"> values=tensor([1., 1., 2.]), size=(3, 4), nnz=3, dtype=torch.float64)</span>
</pre></div>
</div>
<p>The sparse matrix-vector multiplication can be performed with the
<code class="xref py py-meth docutils literal notranslate"><span class="pre">tensor.matmul()</span></code> method. This is currently the only math operation
supported on CSR tensors.</p>
<div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">vec</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">4</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float64</span><span class="p">)</span>
<span class="gp">>>> </span><span class="n">sp</span><span class="o">.</span><span class="n">matmul</span><span class="p">(</span><span class="n">vec</span><span class="p">)</span>
<span class="go">tensor([[0.9078],</span>
<span class="go"> [1.3180],</span>
<span class="go"> [0.0000]], dtype=torch.float64)</span>
</pre></div>
</div>
</div>
</div>
<div class="section" id="supported-linear-algebra-operations">
<h2>Supported Linear Algebra operations<a class="headerlink" href="#supported-linear-algebra-operations" title="Permalink to this headline">¶</a></h2>
<p>The following table summarizes supported Linear Algebra operations on
sparse matrices where the operands layouts may vary. Here
<code class="docutils literal notranslate"><span class="pre">T[layout]</span></code> denotes a tensor with a given layout. Similarly,
<code class="docutils literal notranslate"><span class="pre">M[layout]</span></code> denotes a matrix (2-D PyTorch tensor), and <code class="docutils literal notranslate"><span class="pre">V[layout]</span></code>
denotes a vector (1-D PyTorch tensor). In addition, <code class="docutils literal notranslate"><span class="pre">f</span></code> denotes a
scalar (float or 0-D PyTorch tensor), <code class="docutils literal notranslate"><span class="pre">*</span></code> is element-wise
multiplication, and <code class="docutils literal notranslate"><span class="pre">@</span></code> is matrix multiplication.</p>
<table class="colwidths-given docutils colwidths-auto align-default">
<colgroup>
<col style="width: 24%" />
<col style="width: 6%" />
<col style="width: 71%" />
</colgroup>
<thead>
<tr class="row-odd"><th class="head"><p>PyTorch operation</p></th>
<th class="head"><p>Sparse grad?</p></th>
<th class="head"><p>Layout signature</p></th>
</tr>
</thead>
<tbody>
<tr class="row-even"><td><p><a class="reference internal" href="generated/torch.mv.html#torch.mv" title="torch.mv"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.mv()</span></code></a></p></td>
<td><p>no</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">M[sparse_coo]</span> <span class="pre">@</span> <span class="pre">V[strided]</span> <span class="pre">-></span> <span class="pre">V[strided]</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="generated/torch.mv.html#torch.mv" title="torch.mv"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.mv()</span></code></a></p></td>
<td><p>no</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">M[sparse_csr]</span> <span class="pre">@</span> <span class="pre">V[strided]</span> <span class="pre">-></span> <span class="pre">V[strided]</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="generated/torch.matmul.html#torch.matmul" title="torch.matmul"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.matmul()</span></code></a></p></td>
<td><p>no</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">M[sparse_coo]</span> <span class="pre">@</span> <span class="pre">M[strided]</span> <span class="pre">-></span> <span class="pre">M[strided]</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="generated/torch.matmul.html#torch.matmul" title="torch.matmul"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.matmul()</span></code></a></p></td>
<td><p>no</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">M[sparse_csr]</span> <span class="pre">@</span> <span class="pre">M[strided]</span> <span class="pre">-></span> <span class="pre">M[strided]</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="generated/torch.mm.html#torch.mm" title="torch.mm"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.mm()</span></code></a></p></td>
<td><p>no</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">M[sparse_coo]</span> <span class="pre">@</span> <span class="pre">M[strided]</span> <span class="pre">-></span> <span class="pre">M[strided]</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="generated/torch.sparse.mm.html#torch.sparse.mm" title="torch.sparse.mm"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.sparse.mm()</span></code></a></p></td>
<td><p>yes</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">M[sparse_coo]</span> <span class="pre">@</span> <span class="pre">M[strided]</span> <span class="pre">-></span> <span class="pre">M[strided]</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="generated/torch.smm.html#torch.smm" title="torch.smm"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.smm()</span></code></a></p></td>
<td><p>no</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">M[sparse_coo]</span> <span class="pre">@</span> <span class="pre">M[strided]</span> <span class="pre">-></span> <span class="pre">M[sparse_coo]</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="generated/torch.hspmm.html#torch.hspmm" title="torch.hspmm"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.hspmm()</span></code></a></p></td>
<td><p>no</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">M[sparse_coo]</span> <span class="pre">@</span> <span class="pre">M[strided]</span> <span class="pre">-></span> <span class="pre">M[hybrid</span> <span class="pre">sparse_coo]</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="generated/torch.bmm.html#torch.bmm" title="torch.bmm"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.bmm()</span></code></a></p></td>
<td><p>no</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">T[sparse_coo]</span> <span class="pre">@</span> <span class="pre">T[strided]</span> <span class="pre">-></span> <span class="pre">T[strided]</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="generated/torch.addmm.html#torch.addmm" title="torch.addmm"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.addmm()</span></code></a></p></td>
<td><p>no</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">f</span> <span class="pre">*</span> <span class="pre">M[strided]</span> <span class="pre">+</span> <span class="pre">f</span> <span class="pre">*</span> <span class="pre">(M[sparse_coo]</span> <span class="pre">@</span> <span class="pre">M[strided])</span> <span class="pre">-></span> <span class="pre">M[strided]</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="generated/torch.sparse.addmm.html#torch.sparse.addmm" title="torch.sparse.addmm"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.sparse.addmm()</span></code></a></p></td>
<td><p>yes</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">f</span> <span class="pre">*</span> <span class="pre">M[strided]</span> <span class="pre">+</span> <span class="pre">f</span> <span class="pre">*</span> <span class="pre">(M[sparse_coo]</span> <span class="pre">@</span> <span class="pre">M[strided])</span> <span class="pre">-></span> <span class="pre">M[strided]</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="generated/torch.sspaddmm.html#torch.sspaddmm" title="torch.sspaddmm"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.sspaddmm()</span></code></a></p></td>
<td><p>no</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">f</span> <span class="pre">*</span> <span class="pre">M[sparse_coo]</span> <span class="pre">+</span> <span class="pre">f</span> <span class="pre">*</span> <span class="pre">(M[sparse_coo]</span> <span class="pre">@</span> <span class="pre">M[strided])</span> <span class="pre">-></span> <span class="pre">M[sparse_coo]</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="generated/torch.lobpcg.html#torch.lobpcg" title="torch.lobpcg"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.lobpcg()</span></code></a></p></td>
<td><p>no</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">GENEIG(M[sparse_coo])</span> <span class="pre">-></span> <span class="pre">M[strided],</span> <span class="pre">M[strided]</span></code></p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="generated/torch.pca_lowrank.html#torch.pca_lowrank" title="torch.pca_lowrank"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.pca_lowrank()</span></code></a></p></td>
<td><p>yes</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">PCA(M[sparse_coo])</span> <span class="pre">-></span> <span class="pre">M[strided],</span> <span class="pre">M[strided],</span> <span class="pre">M[strided]</span></code></p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="generated/torch.svd_lowrank.html#torch.svd_lowrank" title="torch.svd_lowrank"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.svd_lowrank()</span></code></a></p></td>
<td><p>yes</p></td>
<td><p><code class="docutils literal notranslate"><span class="pre">SVD(M[sparse_coo])</span> <span class="pre">-></span> <span class="pre">M[strided],</span> <span class="pre">M[strided],</span> <span class="pre">M[strided]</span></code></p></td>
</tr>
</tbody>
</table>
<p>where “Sparse grad?” column indicates if the PyTorch operation supports
backward with respect to sparse matrix argument. All PyTorch operations,
except <a class="reference internal" href="generated/torch.smm.html#torch.smm" title="torch.smm"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.smm()</span></code></a>, support backward with respect to strided
matrix arguments.</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Currently, PyTorch does not support matrix multiplication with the
layout signature <code class="docutils literal notranslate"><span class="pre">M[strided]</span> <span class="pre">@</span> <span class="pre">M[sparse_coo]</span></code>. However,
applications can still compute this using the matrix relation <code class="docutils literal notranslate"><span class="pre">D</span> <span class="pre">@</span>
<span class="pre">S</span> <span class="pre">==</span> <span class="pre">(S.t()</span> <span class="pre">@</span> <span class="pre">D.t()).t()</span></code>.</p>
</div>
</div>
<div class="section" id="tensor-methods-and-sparse">
<h2>Tensor methods and sparse<a class="headerlink" href="#tensor-methods-and-sparse" title="Permalink to this headline">¶</a></h2>
<p>The following Tensor methods are related to sparse tensors:</p>
<table class="longtable docutils colwidths-auto align-default">
<tbody>
<tr class="row-odd"><td><p><a class="reference internal" href="generated/torch.Tensor.is_sparse.html#torch.Tensor.is_sparse" title="torch.Tensor.is_sparse"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Tensor.is_sparse</span></code></a></p></td>
<td><p>Is <code class="docutils literal notranslate"><span class="pre">True</span></code> if the Tensor uses sparse storage layout, <code class="docutils literal notranslate"><span class="pre">False</span></code> otherwise.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="generated/torch.Tensor.dense_dim.html#torch.Tensor.dense_dim" title="torch.Tensor.dense_dim"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Tensor.dense_dim</span></code></a></p></td>
<td><p>Return the number of dense dimensions in a <a class="reference internal" href="#sparse-docs"><span class="std std-ref">sparse tensor</span></a> <code class="xref py py-attr docutils literal notranslate"><span class="pre">self</span></code>.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="generated/torch.Tensor.sparse_dim.html#torch.Tensor.sparse_dim" title="torch.Tensor.sparse_dim"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Tensor.sparse_dim</span></code></a></p></td>
<td><p>Return the number of sparse dimensions in a <a class="reference internal" href="#sparse-docs"><span class="std std-ref">sparse tensor</span></a> <code class="xref py py-attr docutils literal notranslate"><span class="pre">self</span></code>.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="generated/torch.Tensor.sparse_mask.html#torch.Tensor.sparse_mask" title="torch.Tensor.sparse_mask"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Tensor.sparse_mask</span></code></a></p></td>
<td><p>Returns a new <a class="reference internal" href="#sparse-docs"><span class="std std-ref">sparse tensor</span></a> with values from a strided tensor <code class="xref py py-attr docutils literal notranslate"><span class="pre">self</span></code> filtered by the indices of the sparse tensor <code class="xref py py-attr docutils literal notranslate"><span class="pre">mask</span></code>.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="generated/torch.Tensor.to_sparse.html#torch.Tensor.to_sparse" title="torch.Tensor.to_sparse"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Tensor.to_sparse</span></code></a></p></td>
<td><p>Returns a sparse copy of the tensor.</p></td>
</tr>
<tr class="row-even"><td><p><code class="xref py py-obj docutils literal notranslate"><span class="pre">Tensor._to_sparse_csr</span></code></p></td>
<td><p>Convert a tensor to compressed row storage format.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="generated/torch.Tensor.indices.html#torch.Tensor.indices" title="torch.Tensor.indices"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Tensor.indices</span></code></a></p></td>
<td><p>Return the indices tensor of a <a class="reference internal" href="#sparse-coo-docs"><span class="std std-ref">sparse COO tensor</span></a>.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="generated/torch.Tensor.values.html#torch.Tensor.values" title="torch.Tensor.values"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Tensor.values</span></code></a></p></td>
<td><p>Return the values tensor of a <a class="reference internal" href="#sparse-coo-docs"><span class="std std-ref">sparse COO tensor</span></a>.</p></td>
</tr>
</tbody>
</table>
<p>The following Tensor methods are specific to sparse COO tensors:</p>
<table class="longtable docutils colwidths-auto align-default">
<tbody>
<tr class="row-odd"><td><p><a class="reference internal" href="generated/torch.Tensor.coalesce.html#torch.Tensor.coalesce" title="torch.Tensor.coalesce"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Tensor.coalesce</span></code></a></p></td>
<td><p>Returns a coalesced copy of <code class="xref py py-attr docutils literal notranslate"><span class="pre">self</span></code> if <code class="xref py py-attr docutils literal notranslate"><span class="pre">self</span></code> is an <a class="reference internal" href="#sparse-uncoalesced-coo-docs"><span class="std std-ref">uncoalesced tensor</span></a>.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="generated/torch.Tensor.sparse_resize_.html#torch.Tensor.sparse_resize_" title="torch.Tensor.sparse_resize_"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Tensor.sparse_resize_</span></code></a></p></td>
<td><p>Resizes <code class="xref py py-attr docutils literal notranslate"><span class="pre">self</span></code> <a class="reference internal" href="#sparse-docs"><span class="std std-ref">sparse tensor</span></a> to the desired size and the number of sparse and dense dimensions.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="generated/torch.Tensor.sparse_resize_and_clear_.html#torch.Tensor.sparse_resize_and_clear_" title="torch.Tensor.sparse_resize_and_clear_"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Tensor.sparse_resize_and_clear_</span></code></a></p></td>
<td><p>Removes all specified elements from a <a class="reference internal" href="#sparse-docs"><span class="std std-ref">sparse tensor</span></a> <code class="xref py py-attr docutils literal notranslate"><span class="pre">self</span></code> and resizes <code class="xref py py-attr docutils literal notranslate"><span class="pre">self</span></code> to the desired size and the number of sparse and dense dimensions.</p></td>
</tr>
<tr class="row-even"><td><p><a class="reference internal" href="generated/torch.Tensor.is_coalesced.html#torch.Tensor.is_coalesced" title="torch.Tensor.is_coalesced"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Tensor.is_coalesced</span></code></a></p></td>
<td><p>Returns <code class="docutils literal notranslate"><span class="pre">True</span></code> if <code class="xref py py-attr docutils literal notranslate"><span class="pre">self</span></code> is a <a class="reference internal" href="#sparse-coo-docs"><span class="std std-ref">sparse COO tensor</span></a> that is coalesced, <code class="docutils literal notranslate"><span class="pre">False</span></code> otherwise.</p></td>
</tr>
<tr class="row-odd"><td><p><a class="reference internal" href="generated/torch.Tensor.to_dense.html#torch.Tensor.to_dense" title="torch.Tensor.to_dense"><code class="xref py py-obj docutils literal notranslate"><span class="pre">Tensor.to_dense</span></code></a></p></td>
<td><p>Creates a strided copy of <code class="xref py py-attr docutils literal notranslate"><span class="pre">self</span></code>.</p></td>
</tr>
</tbody>
</table>
<p>The following methods are specific to <a class="reference internal" href="#sparse-csr-docs"><span class="std std-ref">sparse CSR tensors</span></a>:</p>
<table class="longtable docutils colwidths-auto align-default">
<tbody>
<tr class="row-odd"><td><p><code class="xref py py-obj docutils literal notranslate"><span class="pre">Tensor.crow_indices</span></code></p></td>
<td><p>Returns the tensor containing the compressed row indices of the <code class="xref py py-attr docutils literal notranslate"><span class="pre">self</span></code> tensor when <code class="xref py py-attr docutils literal notranslate"><span class="pre">self</span></code> is a sparse CSR tensor of layout <code class="docutils literal notranslate"><span class="pre">sparse_csr</span></code>.</p></td>
</tr>
<tr class="row-even"><td><p><code class="xref py py-obj docutils literal notranslate"><span class="pre">Tensor.col_indices</span></code></p></td>
<td><p>Returns the tensor containing the column indices of the <code class="xref py py-attr docutils literal notranslate"><span class="pre">self</span></code> tensor when <code class="xref py py-attr docutils literal notranslate"><span class="pre">self</span></code> is a sparse CSR tensor of layout <code class="docutils literal notranslate"><span class="pre">sparse_csr</span></code>.</p></td>
</tr>
</tbody>
</table>
<p>The following Tensor methods support sparse COO tensors:</p>
<p><a class="reference internal" href="generated/torch.Tensor.add.html#torch.Tensor.add" title="torch.Tensor.add"><code class="xref py py-meth docutils literal notranslate"><span class="pre">add()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.add_.html#torch.Tensor.add_" title="torch.Tensor.add_"><code class="xref py py-meth docutils literal notranslate"><span class="pre">add_()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.addmm.html#torch.Tensor.addmm" title="torch.Tensor.addmm"><code class="xref py py-meth docutils literal notranslate"><span class="pre">addmm()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.addmm_.html#torch.Tensor.addmm_" title="torch.Tensor.addmm_"><code class="xref py py-meth docutils literal notranslate"><span class="pre">addmm_()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.any.html#torch.Tensor.any" title="torch.Tensor.any"><code class="xref py py-meth docutils literal notranslate"><span class="pre">any()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.asin.html#torch.Tensor.asin" title="torch.Tensor.asin"><code class="xref py py-meth docutils literal notranslate"><span class="pre">asin()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.asin_.html#torch.Tensor.asin_" title="torch.Tensor.asin_"><code class="xref py py-meth docutils literal notranslate"><span class="pre">asin_()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.arcsin.html#torch.Tensor.arcsin" title="torch.Tensor.arcsin"><code class="xref py py-meth docutils literal notranslate"><span class="pre">arcsin()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.arcsin_.html#torch.Tensor.arcsin_" title="torch.Tensor.arcsin_"><code class="xref py py-meth docutils literal notranslate"><span class="pre">arcsin_()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.bmm.html#torch.Tensor.bmm" title="torch.Tensor.bmm"><code class="xref py py-meth docutils literal notranslate"><span class="pre">bmm()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.clone.html#torch.Tensor.clone" title="torch.Tensor.clone"><code class="xref py py-meth docutils literal notranslate"><span class="pre">clone()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.deg2rad.html#torch.Tensor.deg2rad" title="torch.Tensor.deg2rad"><code class="xref py py-meth docutils literal notranslate"><span class="pre">deg2rad()</span></code></a>
<code class="xref py py-meth docutils literal notranslate"><span class="pre">deg2rad_()</span></code>
<a class="reference internal" href="generated/torch.Tensor.detach.html#torch.Tensor.detach" title="torch.Tensor.detach"><code class="xref py py-meth docutils literal notranslate"><span class="pre">detach()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.detach_.html#torch.Tensor.detach_" title="torch.Tensor.detach_"><code class="xref py py-meth docutils literal notranslate"><span class="pre">detach_()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.dim.html#torch.Tensor.dim" title="torch.Tensor.dim"><code class="xref py py-meth docutils literal notranslate"><span class="pre">dim()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.div.html#torch.Tensor.div" title="torch.Tensor.div"><code class="xref py py-meth docutils literal notranslate"><span class="pre">div()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.div_.html#torch.Tensor.div_" title="torch.Tensor.div_"><code class="xref py py-meth docutils literal notranslate"><span class="pre">div_()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.floor_divide.html#torch.Tensor.floor_divide" title="torch.Tensor.floor_divide"><code class="xref py py-meth docutils literal notranslate"><span class="pre">floor_divide()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.floor_divide_.html#torch.Tensor.floor_divide_" title="torch.Tensor.floor_divide_"><code class="xref py py-meth docutils literal notranslate"><span class="pre">floor_divide_()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.get_device.html#torch.Tensor.get_device" title="torch.Tensor.get_device"><code class="xref py py-meth docutils literal notranslate"><span class="pre">get_device()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.index_select.html#torch.Tensor.index_select" title="torch.Tensor.index_select"><code class="xref py py-meth docutils literal notranslate"><span class="pre">index_select()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.isnan.html#torch.Tensor.isnan" title="torch.Tensor.isnan"><code class="xref py py-meth docutils literal notranslate"><span class="pre">isnan()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.log1p.html#torch.Tensor.log1p" title="torch.Tensor.log1p"><code class="xref py py-meth docutils literal notranslate"><span class="pre">log1p()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.log1p_.html#torch.Tensor.log1p_" title="torch.Tensor.log1p_"><code class="xref py py-meth docutils literal notranslate"><span class="pre">log1p_()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.mm.html#torch.Tensor.mm" title="torch.Tensor.mm"><code class="xref py py-meth docutils literal notranslate"><span class="pre">mm()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.mul.html#torch.Tensor.mul" title="torch.Tensor.mul"><code class="xref py py-meth docutils literal notranslate"><span class="pre">mul()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.mul_.html#torch.Tensor.mul_" title="torch.Tensor.mul_"><code class="xref py py-meth docutils literal notranslate"><span class="pre">mul_()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.mv.html#torch.Tensor.mv" title="torch.Tensor.mv"><code class="xref py py-meth docutils literal notranslate"><span class="pre">mv()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.narrow_copy.html#torch.Tensor.narrow_copy" title="torch.Tensor.narrow_copy"><code class="xref py py-meth docutils literal notranslate"><span class="pre">narrow_copy()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.neg.html#torch.Tensor.neg" title="torch.Tensor.neg"><code class="xref py py-meth docutils literal notranslate"><span class="pre">neg()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.neg_.html#torch.Tensor.neg_" title="torch.Tensor.neg_"><code class="xref py py-meth docutils literal notranslate"><span class="pre">neg_()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.negative.html#torch.Tensor.negative" title="torch.Tensor.negative"><code class="xref py py-meth docutils literal notranslate"><span class="pre">negative()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.negative_.html#torch.Tensor.negative_" title="torch.Tensor.negative_"><code class="xref py py-meth docutils literal notranslate"><span class="pre">negative_()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.numel.html#torch.Tensor.numel" title="torch.Tensor.numel"><code class="xref py py-meth docutils literal notranslate"><span class="pre">numel()</span></code></a>
<a class="reference internal" href="generated/torch.Tensor.rad2deg.html#torch.Tensor.rad2deg" title="torch.Tensor.rad2deg"><code class="xref py py-meth docutils literal notranslate"><span class="pre">rad2deg()</span></code></a>
<code class="xref py py-meth docutils literal notranslate"><span class="pre">rad2deg_()</span></code>
<a class="reference internal" href="generated/torch.Tensor.resize_as_.html#torch.Tensor.resize_as_" title="torch.Tensor.resize_as_"><code class="xref py py-meth docutils literal notranslate"><span class="pre">resize_as_()</span></code></a>