Skip to content

Commit

Permalink
passes gradient check for offest @ DOUBLE precision
Browse files Browse the repository at this point in the history
  • Loading branch information
CharlesShang committed Dec 5, 2018
1 parent 9b00cff commit 41ee18d
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 8 deletions.
2 changes: 1 addition & 1 deletion src/dcn_v2_cuda.c
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ void dcn_v2_cuda_backward(THCudaTensor *input, THCudaTensor *weight,
{
// Resize plane and fill with ones...
THCudaTensor_resize2d(state, ones, height_out, width_out);
THCudaTensor_fill(state, ones, 1);
THCudaTensor_fill(state, ones, 1.0f);
}

THCudaTensor_resize4d(state, grad_input, batch, channels, height, width);
Expand Down
6 changes: 3 additions & 3 deletions src/dcn_v2_cuda_double.c
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ void dcn_v2_cuda_backward(THCudaDoubleTensor *input, THCudaDoubleTensor *weight,
{
// Resize plane and fill with ones...
THCudaDoubleTensor_resize2d(state, ones, height_out, width_out);
THCudaDoubleTensor_fill(state, ones, 1);
THCudaDoubleTensor_fill(state, ones, 1.0);
}

// THCudaDoubleTensor_resize4d(state, grad_input, batch, channels, height, width);
Expand Down Expand Up @@ -239,9 +239,9 @@ void dcn_v2_cuda_backward(THCudaDoubleTensor *input, THCudaDoubleTensor *weight,
// long k__ = height_out * width_out;
THCudaBlas_Dgemv(state,
't',
k_, m_, 1.0f,
k_, m_, 1.0,
THCudaDoubleTensor_data(state, grad_output_n), k_,
THCudaDoubleTensor_data(state, ones), 1, 1.0f,
THCudaDoubleTensor_data(state, ones), 1, 1.0,
THCudaDoubleTensor_data(state, grad_bias), 1);
}

Expand Down
8 changes: 4 additions & 4 deletions test.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,8 @@ def check_gradient_double():
input.requires_grad = True

offset = torch.randn(N, deformable_groups * 2 * kW * kH, inH, inW, dtype=torch.float64).cuda()
offset.data.zero_()
offset.data -= 0.5
# offset.data.zero_()
# offset.data -= 0.00001
offset.requires_grad = True

mask = torch.rand(N, deformable_groups * 1 * kW * kH, inH, inW, dtype=torch.float64).cuda()
Expand All @@ -93,8 +93,8 @@ def check_gradient():
input.requires_grad = True

offset = torch.randn(N, deformable_groups * 2 * kW * kH, inH, inW).cuda()
offset.data.zero_()
offset.data -= 0.5
# offset.data.zero_()
# offset.data -= 0.5
offset.requires_grad = True

mask = torch.rand(N, deformable_groups * 1 * kW * kH, inH, inW).cuda()
Expand Down

0 comments on commit 41ee18d

Please sign in to comment.