Skip to content

Commit

Permalink
2D Strided to/from CSC, COO to CSC, CSC to CSC conversion. (pytorch#7…
Browse files Browse the repository at this point in the history
…7521)

Adds
- to_sparse_csc for strided input
- to_sparse_csc for COO input
- CSC to strided
- CSC to CSR
- CSC to CSC

Uses SciPy as a reference

Follow up work is changing transpose to return CSC when passed CSR and the resulting ripples through our matmul operations.
Pull Request resolved: pytorch#77521
Approved by: https://github.com/pearu, https://github.com/anjali411
  • Loading branch information
cpuhrsch authored and pytorchmergebot committed May 18, 2022
1 parent 687ab97 commit e10a002
Show file tree
Hide file tree
Showing 4 changed files with 257 additions and 65 deletions.
145 changes: 121 additions & 24 deletions aten/src/ATen/native/TensorConversions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include <ATen/Parallel.h>

#include <c10/core/impl/DeviceGuardImplInterface.h>
#include <ATen/SparseTensorUtils.h>

namespace at {
namespace native {
Expand Down Expand Up @@ -325,7 +326,7 @@ Tensor to_dense(const Tensor& tensor, c10::optional<c10::ScalarType> dtype) {
if (tensor.layout() == c10::kSparse) {
return tensor._to_dense(dtype);
}
if (tensor.layout() == c10::kSparseCsr) {
if (tensor.layout() == c10::kSparseCsr || tensor.layout() == c10::kSparseCsc) {
return tensor._to_dense(dtype);
}
if (tensor.layout() == c10::kMkldnn) {
Expand All @@ -347,13 +348,16 @@ Tensor sparse_to_dense(
return dst.add_(self);
}

Tensor sparse_csr_to_dense(
Tensor sparse_compressed_to_dense(
const Tensor& self,
c10::optional<ScalarType> dtype) {
TORCH_CHECK(
!dtype.has_value(), "dtype argument is not supported by sparse_csr_to_dense");
Tensor dst = at::zeros(self.sizes(), self.options().layout(kStrided));
return dst.add_(self);
if (self.layout() == kSparseCsr) {
Tensor dst = at::zeros(self.sizes(), self.options().layout(kStrided));
return dst.add_(self);
}
return self.to_sparse().to_dense();
}

// Computes the strides for view_dtype output when the view dtype is
Expand Down Expand Up @@ -482,9 +486,7 @@ Tensor dense_to_sparse_csr(const Tensor& self) {
}

Tensor dense_to_sparse_csc(const Tensor& self) {
AT_ERROR(
"Conversion from ", self.layout(), " to SparseCsc is currently not supported.");
return self;
return self.to_sparse().to_sparse_csc();
}

Tensor dense_to_sparse_bsr(const Tensor& self, IntArrayRef blocksize) {
Expand All @@ -500,24 +502,68 @@ Tensor dense_to_sparse_bsc(const Tensor& self, IntArrayRef blocksize) {
}

Tensor sparse_compressed_to_sparse_csr(const Tensor& self) {
// Just returning self doesn't work
// RuntimeError: t.use_count() <= 1 INTERNAL ASSERT FAILED at
// "../torch/csrc/autograd/autograd_not_implemented_fallback.cpp":152, please
// report a bug to PyTorch. aten::to_sparse_csr
return at::native::_sparse_csr_tensor_unsafe(
self.crow_indices(),
self.col_indices(),
self.values(),
self.sizes(),
self.scalar_type(),
c10::kSparseCsr,
self.device());
if (self.layout() == kSparseCsc) {
TORCH_CHECK(
self.dim() == 2,
"Expected self to be of dimension 2, but got ",
self.dim(),
".");
auto sizes = self.sizes();
auto ccol_indices = self.ccol_indices();
auto row_indices = self.row_indices();
auto values = self.values();

// convert CSC indices to COO indices and swap its rows
const bool out_int32 = ccol_indices.scalar_type() == ScalarType::Int;
Tensor indices_transposed = _convert_indices_from_csr_to_coo(
ccol_indices, row_indices, out_int32, true);

// sort transposed indices
auto indices_scalar =
at::sparse::flatten_indices(indices_transposed, {sizes[0], sizes[1]});
auto indicesPermutation = std::get<1>(indices_scalar.sort(0));
auto indices_transposed_sorted =
indices_transposed.index_select(1, indicesPermutation);

// construct a CSR tensor
auto new_row_indices = indices_transposed_sorted.select(0, 0);
auto new_col_indices = indices_transposed_sorted.select(0, 1);
auto new_values = values.index_select(0, indicesPermutation);
Tensor new_crow_indices =
_convert_indices_from_coo_to_csr(new_row_indices, sizes[0], out_int32);

return _sparse_csr_tensor_unsafe(
new_crow_indices,
new_col_indices,
new_values,
{sizes[0], sizes[1]},
new_values.scalar_type(),
c10::kSparseCsr,
new_values.device());
}
if (self.layout() == kSparseCsr) {
// Just returning self doesn't work
// RuntimeError: t.use_count() <= 1 INTERNAL ASSERT FAILED at
// "../torch/csrc/autograd/autograd_not_implemented_fallback.cpp":152,
// please report a bug to PyTorch. aten::to_sparse_csr
return at::native::_sparse_csr_tensor_unsafe(
self.crow_indices(),
self.col_indices(),
self.values(),
self.sizes(),
self.scalar_type(),
c10::kSparseCsr,
self.device());
}
AT_ERROR(
"sparse_compressed_to_sparse_csr expected SparseCsr or SparseCsc layout but got ",
self.layout());
}

Tensor coo_to_sparse_csr(const Tensor& self) {
TORCH_CHECK(
self.dim() == 2,
"Only 2D tensors can be converted to the CSR format but got shape: ",
"Only 2D tensors can be converted to the SparseCsr layout but got shape: ",
self.sizes());
auto coalesced_self = self.coalesce();
auto row_indices = coalesced_self.indices()[0];
Expand All @@ -535,9 +581,19 @@ Tensor coo_to_sparse_csr(const Tensor& self) {
}

Tensor coo_to_sparse_csc(const Tensor& self) {
AT_ERROR(
"Conversion from ", self.layout(), " to SparseCsc is currently not supported.");
return self;
TORCH_CHECK(
self.dim() == 2,
"Only 2D tensors can be converted to the SparseCsc layout but got shape: ",
self.sizes());
auto coalesced_self = self.transpose(0, 1).coalesce().to_sparse_csr();
return at::native::_sparse_csc_tensor_unsafe(
coalesced_self.crow_indices(),
coalesced_self.col_indices(),
coalesced_self.values(),
self.sizes(),
coalesced_self.scalar_type(),
c10::kSparseCsc,
coalesced_self.device());
}

Tensor coo_to_sparse_bsr(const Tensor& self, IntArrayRef blocksize) {
Expand Down Expand Up @@ -874,9 +930,50 @@ Tensor sparse_compressed_to_sparse_bsc(const Tensor& self, IntArrayRef blocksize
}

Tensor sparse_compressed_to_sparse_csc(const Tensor& self) {
if (self.layout() == kSparseCsc) {
// Based on to_sparse_csr just returning self doesn't work
return _sparse_csc_tensor_unsafe(
self.ccol_indices(),
self.row_indices(),
self.values(),
self.sizes(),
self.scalar_type(),
c10::kSparseCsc,
self.device());
}
AT_ERROR(
"Conversion from ", self.layout(), " to SparseCsc is currently not supported.");
return self;
}

Tensor sparse_compressed_to_sparse(const Tensor& self, int64_t sparse_dim) {
TORCH_CHECK(sparse_dim > 0, "sparse_dim must be >0");
TORCH_CHECK(sparse_dim <= 2,
"sparse_dim must be less than or equal to 2");
// TODO: implement coo.to_sparse(sparse_dim) and then use
// return self.to_sparse().to_sparse(sparse_dim);
TORCH_CHECK(
sparse_dim == 2, "sparse dim 1 is not supported by sparse_csr_to_dense");
if (self.layout() == kSparseCsc) {
Tensor indices = at::_convert_indices_from_csr_to_coo(
self.ccol_indices(), self.row_indices(), false, true);
return at::native::_sparse_coo_tensor_unsafe(
indices, self.values(), self.sizes())
._coalesced_(true);
}
if (self.layout() == kSparseCsr) {
Tensor indices = at::_convert_indices_from_csr_to_coo(
self.crow_indices(), self.col_indices(), false, false);
return at::native::_sparse_coo_tensor_unsafe(
indices, self.values(), self.sizes())
._coalesced_(true);
}
AT_ERROR(
"sparse_compressed_to_sparse expected SparseCsr or SparseCsc layout but got ",
self.layout());
}

Tensor sparse_compressed_to_sparse(const Tensor& self) {
return sparse_compressed_to_sparse(self, 2);
}

// Sparse layout conversions End
Expand Down
6 changes: 3 additions & 3 deletions aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5657,7 +5657,7 @@
variants: method
dispatch:
SparseCPU, SparseCUDA: sparse_to_dense
SparseCsrCPU, SparseCsrCUDA: sparse_csr_to_dense
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_dense
MkldnnCPU: mkldnn_to_dense

- func: to_dense_backward(Tensor grad, Tensor input) -> Tensor
Expand Down Expand Up @@ -5816,13 +5816,13 @@
variants: method
dispatch:
CPU, CUDA: dense_to_sparse
SparseCsrCPU, SparseCsrCUDA: sparse_csr_to_sparse
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse

- func: to_sparse(Tensor self) -> Tensor
variants: method
dispatch:
CPU, CUDA: dense_to_sparse
SparseCsrCPU, SparseCsrCUDA: sparse_csr_to_sparse
SparseCsrCPU, SparseCsrCUDA: sparse_compressed_to_sparse

- func: to_sparse_csr(Tensor self) -> Tensor
variants: method
Expand Down
23 changes: 0 additions & 23 deletions aten/src/ATen/native/sparse/SparseTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -544,29 +544,6 @@ SparseTensor dense_to_sparse(const Tensor& self, int64_t sparse_dim) {
return sparse._coalesced_(true);
}

SparseTensor sparse_csr_to_sparse(const Tensor& self, int64_t sparse_dim) {
TORCH_INTERNAL_ASSERT(self.is_sparse_csr());
TORCH_CHECK(sparse_dim > 0, "sparse_dim must be >0");
TORCH_CHECK(sparse_dim <= 2,
"sparse_dim must be less than or equal to 2");
if (sparse_dim == 2) {
auto sizes = self.sizes();
Tensor crow_indices = self.crow_indices();
Tensor col_indices = self.col_indices();
Tensor values = self.values();
Tensor indices = at::_convert_indices_from_csr_to_coo(crow_indices, col_indices, false, false);
return at::native::_sparse_coo_tensor_unsafe(indices, values, sizes)._coalesced_(true);
} else {
TORCH_CHECK(false, "sparse dim 1 is not supported by sparse_csr_to_dense");
// TODO: implement coo.to_sparse(sparse_dim) and then use
// return self.to_sparse().to_sparse(sparse_dim);
}
}

SparseTensor sparse_csr_to_sparse(const Tensor& self) {
return sparse_csr_to_sparse(self, 2);
}

// NB: Dropped the resizeNd variants

SparseTensor& copy_sparse_wrapper_(
Expand Down
Loading

0 comments on commit e10a002

Please sign in to comment.