Skip to content

Commit

Permalink
Compatibility with Pytorch 1.4, fix AT_CHECK warnings
Browse files Browse the repository at this point in the history
  • Loading branch information
ducksoup committed Jan 27, 2020
1 parent ff5439b commit 35425e7
Show file tree
Hide file tree
Showing 5 changed files with 47 additions and 41 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ To install PyTorch, please refer to https://github.com/pytorch/pytorch#installat

To install the package containing the iABN layers:
```bash
pip install git+https://github.com/mapillary/[email protected].10
pip install git+https://github.com/mapillary/[email protected].11
```
Note that some parts of InPlace-ABN have native C++/CUDA implementations, meaning that the command above will need to
compile them.
Expand Down
14 changes: 10 additions & 4 deletions include/checks.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,16 @@

#include <ATen/ATen.h>

#define CHECK_CUDA(x) AT_CHECK((x).is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CPU(x) AT_CHECK(!(x).is_cuda(), #x " must be a CPU tensor")
#define CHECK_NOT_HALF(x) AT_CHECK((x).scalar_type() != at::ScalarType::Half, #x " can't have type Half")
#define CHECK_SAME_TYPE(x, y) AT_CHECK((x).scalar_type() == (y).scalar_type(), #x " and " #y " must have the same scalar type")
#ifdef TORCH_CHECK
#define IABN_CHECK TORCH_CHECK
#else
#define IABN_CHECK AT_CHECK
#endif

#define CHECK_CUDA(x) IABN_CHECK((x).is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CPU(x) IABN_CHECK(!(x).is_cuda(), #x " must be a CPU tensor")
#define CHECK_NOT_HALF(x) IABN_CHECK((x).scalar_type() != at::ScalarType::Half, #x " can't have type Half")
#define CHECK_SAME_TYPE(x, y) IABN_CHECK((x).scalar_type() == (y).scalar_type(), #x " and " #y " must have the same scalar type")

inline bool have_same_dims(const at::Tensor& x, const at::Tensor& y) {
bool success = x.ndimension() == y.ndimension();
Expand Down
4 changes: 2 additions & 2 deletions inplace_abn/_backend.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ def backward_reduce(y_act: torch.Tensor, dy_act: torch.Tensor, weight: Optional[
-> Tuple[torch.Tensor, torch.Tensor]: ...


def backward(xhat: torch.Tensor, dy: torch.Tensor, var: torch.Tensor, count: torch.Tensor, sum_dy: torch.Tensor,
sum_xhat_dy: torch.Tensor, weight: Optional[torch.Tensor], eps: float) -> torch.Tensor: ...
def backward_train(xhat: torch.Tensor, dy: torch.Tensor, var: torch.Tensor, count: torch.Tensor, sum_dy: torch.Tensor,
sum_xhat_dy: torch.Tensor, weight: Optional[torch.Tensor], eps: float) -> torch.Tensor: ...


def backward_test(dy: torch.Tensor, var: torch.Tensor, weight: Optional[torch.Tensor], eps: float) -> torch.Tensor: ...
Expand Down
2 changes: 1 addition & 1 deletion inplace_abn/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def backward(ctx, dy_act):
if ctx.needs_input_grad[0]:
if ctx.training:
# This overwrites dy with dx
_backend.backward(xhat, dy, var, count, sum_dy, sum_xhat_dy, weight, ctx.eps)
_backend.backward_train(xhat, dy, var, count, sum_dy, sum_xhat_dy, weight, ctx.eps)
dx = dy
else:
dx = _backend.backward_test(dy_act, var, weight, ctx.eps)
Expand Down
66 changes: 33 additions & 33 deletions src/inplace_abn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
**********************************************************************************************************************/

std::tuple<at::Tensor, at::Tensor, at::Tensor> statistics(const at::Tensor& x) {
AT_CHECK(x.ndimension() >= 2, "x should have at least 2 dimensions");
IABN_CHECK(x.ndimension() >= 2, "x should have at least 2 dimensions");

CUDA_DISPATCH(x, statistics, x)
}
Expand All @@ -32,13 +32,13 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> reduce_statistics(

// Check types and dimensions
CHECK_SAME_TYPE(all_mean, all_var);
AT_CHECK(all_count.scalar_type() == at::ScalarType::Long, "all_count should have type int64");
AT_CHECK(all_mean.ndimension() == 2, "all_mean should have size N x C");
AT_CHECK(all_var.ndimension() == 2, "all_var should have size N x C");
AT_CHECK(all_count.ndimension() == 2 && all_count.size(1) == 1, "all_count should have size N x 1");
AT_CHECK(all_mean.size(0) == all_var.size(0) && all_mean.size(0) == all_count.size(0),
IABN_CHECK(all_count.scalar_type() == at::ScalarType::Long, "all_count should have type int64");
IABN_CHECK(all_mean.ndimension() == 2, "all_mean should have size N x C");
IABN_CHECK(all_var.ndimension() == 2, "all_var should have size N x C");
IABN_CHECK(all_count.ndimension() == 2 && all_count.size(1) == 1, "all_count should have size N x 1");
IABN_CHECK(all_mean.size(0) == all_var.size(0) && all_mean.size(0) == all_count.size(0),
"Inputs should have the same size in dimension 0");
AT_CHECK(all_mean.size(1) == all_var.size(1), "all_mean and all_var should have the same size in dimension 1");
IABN_CHECK(all_mean.size(1) == all_var.size(1), "all_mean and all_var should have the same size in dimension 1");

return reduce_statistics_cuda(all_mean, all_var, all_count);
}
Expand All @@ -48,17 +48,17 @@ void forward(at::Tensor& x, const at::Tensor& mean, const at::Tensor& var,
const c10::optional<at::Tensor>& weight, const c10::optional<at::Tensor>& bias,
float eps, Activation activation, float activation_param) {
// Check dimensions and types
AT_CHECK(x.ndimension() >= 2, "x should have at least 2 dimensions");
AT_CHECK(is_compatible_stat(x, mean), "mean is not compatible with x (wrong size or scalar type)");
AT_CHECK(is_compatible_stat(x, var), "var is not compatible with x (wrong size or scalar type)");
IABN_CHECK(x.ndimension() >= 2, "x should have at least 2 dimensions");
IABN_CHECK(is_compatible_stat(x, mean), "mean is not compatible with x (wrong size or scalar type)");
IABN_CHECK(is_compatible_stat(x, var), "var is not compatible with x (wrong size or scalar type)");
if (weight.has_value())
AT_CHECK(is_compatible_weight(x, weight.value()), "weight is not compatible with x (wrong size or scalar type)");
IABN_CHECK(is_compatible_weight(x, weight.value()), "weight is not compatible with x (wrong size or scalar type)");
if (bias.has_value())
AT_CHECK(is_compatible_weight(x, bias.value()), "bias is not compatible with x (wrong size or scalar type)");
IABN_CHECK(is_compatible_weight(x, bias.value()), "bias is not compatible with x (wrong size or scalar type)");
if (weight.has_value() && bias.has_value())
CHECK_SAME_TYPE(weight.value(), bias.value());

AT_CHECK((weight.has_value() && bias.has_value()) || (!weight.has_value() && !bias.has_value()),
IABN_CHECK((weight.has_value() && bias.has_value()) || (!weight.has_value() && !bias.has_value()),
"weight and bias must be equally present or not present");

CUDA_DISPATCH(x, forward, x, mean, var, weight, bias, eps, activation, activation_param)
Expand All @@ -68,38 +68,38 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> backward_reduce(
const at::Tensor& y_act, const at::Tensor& dy_act, const c10::optional<at::Tensor>& weight,
const c10::optional<at::Tensor>& bias, float eps, Activation activation, float activation_param) {
// Check dimensions and types
AT_CHECK(y_act.ndimension() >= 2, "y_act should have at least 2 dimensions");
AT_CHECK(have_same_dims(y_act, dy_act), "y_act and dy_act should have the same size");
IABN_CHECK(y_act.ndimension() >= 2, "y_act should have at least 2 dimensions");
IABN_CHECK(have_same_dims(y_act, dy_act), "y_act and dy_act should have the same size");
CHECK_SAME_TYPE(y_act, dy_act);
if (weight.has_value())
AT_CHECK(is_compatible_weight(y_act, weight.value()),
IABN_CHECK(is_compatible_weight(y_act, weight.value()),
"weight is not compatible with y_act (wrong size or scalar type)");
if (bias.has_value())
AT_CHECK(is_compatible_weight(y_act, bias.value()),
IABN_CHECK(is_compatible_weight(y_act, bias.value()),
"bias is not compatible with y_act (wrong size or scalar type)");
if (weight.has_value() && bias.has_value())
CHECK_SAME_TYPE(weight.value(), bias.value());

AT_CHECK((weight.has_value() && bias.has_value()) || (!weight.has_value() && !bias.has_value()),
IABN_CHECK((weight.has_value() && bias.has_value()) || (!weight.has_value() && !bias.has_value()),
"weight and bias must be equally present or not present");

CUDA_DISPATCH(y_act, backward_reduce, y_act, dy_act, weight, bias, eps, activation, activation_param)
}

void backward(const at::Tensor& xhat, at::Tensor& dy, const at::Tensor& var, const at::Tensor& count,
const at::Tensor& sum_dy, const at::Tensor& sum_xhat_dy, const c10::optional<at::Tensor>& weight,
float eps) {
void backward_train(const at::Tensor& xhat, at::Tensor& dy, const at::Tensor& var, const at::Tensor& count,
const at::Tensor& sum_dy, const at::Tensor& sum_xhat_dy, const c10::optional<at::Tensor>& weight,
float eps) {
// Check dimensions and types
AT_CHECK(xhat.ndimension() >= 2, "xhat should have at least 2 dimensions");
AT_CHECK(have_same_dims(xhat, dy), "xhat and dy should have the same size");
IABN_CHECK(xhat.ndimension() >= 2, "xhat should have at least 2 dimensions");
IABN_CHECK(have_same_dims(xhat, dy), "xhat and dy should have the same size");
CHECK_SAME_TYPE(xhat, dy);
AT_CHECK(is_compatible_stat(xhat, var), "var is not compatible with xhat (wrong size or scalar type)");
AT_CHECK(count.ndimension() == 1 && count.size(0) == 1, "count should be a vector with a single element");
AT_CHECK(count.scalar_type() == at::ScalarType::Long, "count should have type int64");
AT_CHECK(is_compatible_stat(xhat, sum_dy), "sum_dy is not compatible with xhat (wrong size or scalar type)");
AT_CHECK(is_compatible_stat(xhat, sum_xhat_dy), "sum_xhat_dy is not compatible with xhat (wrong size or scalar type)");
IABN_CHECK(is_compatible_stat(xhat, var), "var is not compatible with xhat (wrong size or scalar type)");
IABN_CHECK(count.ndimension() == 1 && count.size(0) == 1, "count should be a vector with a single element");
IABN_CHECK(count.scalar_type() == at::ScalarType::Long, "count should have type int64");
IABN_CHECK(is_compatible_stat(xhat, sum_dy), "sum_dy is not compatible with xhat (wrong size or scalar type)");
IABN_CHECK(is_compatible_stat(xhat, sum_xhat_dy), "sum_xhat_dy is not compatible with xhat (wrong size or scalar type)");
if (weight.has_value())
AT_CHECK(is_compatible_weight(xhat, weight.value()),
IABN_CHECK(is_compatible_weight(xhat, weight.value()),
"weight is not compatible with xhat (wrong size or scalar type)");

CUDA_DISPATCH(xhat, backward, xhat, dy, var, count, sum_dy, sum_xhat_dy, weight, eps)
Expand All @@ -108,10 +108,10 @@ void backward(const at::Tensor& xhat, at::Tensor& dy, const at::Tensor& var, con
at::Tensor backward_test(const at::Tensor& dy_, const at::Tensor& var, const c10::optional<at::Tensor>& weight,
float eps) {
// Check dimensions and types
AT_CHECK(dy_.ndimension() >= 2, "dy should have at least 2 dimensions");
AT_CHECK(is_compatible_stat(dy_, var), "var is not compatible with dy (wrong size or scalar type)");
IABN_CHECK(dy_.ndimension() >= 2, "dy should have at least 2 dimensions");
IABN_CHECK(is_compatible_stat(dy_, var), "var is not compatible with dy (wrong size or scalar type)");
if (weight.has_value())
AT_CHECK(is_compatible_weight(dy_, weight.value()),
IABN_CHECK(is_compatible_weight(dy_, weight.value()),
"weight is not compatible with dy (wrong size or scalar type)");

// TODO: optimize implementation for GPU
Expand Down Expand Up @@ -142,6 +142,6 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {

// Backward methods
m.def("backward_reduce", &backward_reduce, "First step of the backward pass");
m.def("backward", &backward, "Second step of the backward pass. This is an in-place operation w.r.t. dy");
m.def("backward_train", &backward_train, "Second step of the backward pass. This is an in-place operation w.r.t. dy");
m.def("backward_test", &backward_test, "Second step of the backward pass, test mode");
}

0 comments on commit 35425e7

Please sign in to comment.