Skip to content

Commit

Permalink
fix batched_dense_vec_jagged_2d_mul (pytorch#1005)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: pytorch#1005

Fix error torch.autograd.gradcheck.GradcheckError: backward not multiplied by grad_output in CPU

Reviewed By: jasonjk-park, brad-mengchi, mjanderson09

Differential Revision: D35091934

fbshipit-source-id: 85246a01d297d51453b215ba18ede235d321d16c
  • Loading branch information
jspark1105 authored and facebook-github-bot committed Mar 24, 2022
1 parent 21148ee commit 56b7b8d
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 4 deletions.
2 changes: 0 additions & 2 deletions fbgemm_gpu/src/jagged_tensor_ops.cu
Original file line number Diff line number Diff line change
Expand Up @@ -406,7 +406,6 @@ class JaggedDenseAddGPUOp
const Tensor& y) {
ctx->save_for_backward(x_offsets);
ctx->saved_data["x_values_shape"] = x_values.sizes();
ctx->saved_data["y_shape"] = y.sizes();

at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(x_values.get_device());
Expand All @@ -431,7 +430,6 @@ class JaggedDenseAddGPUOp
torch::autograd::variable_list grad_outputs) {
auto offsets = ctx->get_saved_variables();
auto x_values_shape = ctx->saved_data["x_values_shape"].toIntVector();
auto y_shape = ctx->saved_data["y_shape"].toIntVector();
TORCH_CHECK(grad_outputs.size() == 1);

at::cuda::OptionalCUDAGuard device_guard;
Expand Down
4 changes: 2 additions & 2 deletions fbgemm_gpu/src/jagged_tensor_ops_cpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -423,7 +423,6 @@ class JaggedDenseAddCPUOp
const Tensor& y) {
ctx->save_for_backward(x_offsets);
ctx->saved_data["x_values_shape"] = x_values.sizes();
ctx->saved_data["y_shape"] = y.sizes();

Tensor output;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
Expand All @@ -442,7 +441,6 @@ class JaggedDenseAddCPUOp
torch::autograd::variable_list grad_outputs) {
auto offsets = ctx->get_saved_variables();
auto x_values_shape = ctx->saved_data["x_values_shape"].toIntVector();
auto y_shape = ctx->saved_data["y_shape"].toIntVector();
TORCH_CHECK(grad_outputs.size() == 1);

Tensor x_values_grad = at::empty(x_values_shape, grad_outputs[0].options());
Expand Down Expand Up @@ -868,6 +866,8 @@ class BatchedDenseVecJagged2DMulCPUOp
a_values_grad.accessor<scalar_t, 2>());
});
});
} else {
v_grad.zero_();
}

return {
Expand Down

0 comments on commit 56b7b8d

Please sign in to comment.