Skip to content

Commit

Permalink
make_variable consumes the Tensor if it only has one reference
Browse files Browse the repository at this point in the history
Summary: Pull Request resolved: pytorch#22705

Test Plan: Imported from OSS

Differential Revision: D16192220

Pulled By: jamesr66a

fbshipit-source-id: 9c42bb759077b74a1370d3a2d7114ed3593f333b
  • Loading branch information
James Reed authored and facebook-github-bot committed Jul 15, 2019
1 parent b5fa9a3 commit 815e73b
Show file tree
Hide file tree
Showing 5 changed files with 23 additions and 36 deletions.
7 changes: 7 additions & 0 deletions c10/core/TensorImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,9 @@ struct C10_API VariableVersion {
c10::intrusive_ptr<VersionCounter> version_counter_;

public:
bool unique() const {
return 1 == version_counter_.use_count();
}
// NOTE: As of C++11 and 14, default-constructing a std::atomic variable
// leaves it in a persistently undefined state. See
// https://cplusplus.github.io/LWG/issue2334.
Expand Down Expand Up @@ -392,6 +395,10 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
return numel_;
}

bool unique_version() const {
return version_counter_.unique();
}

/**
* Whether or not a tensor is laid out in contiguous memory.
*
Expand Down
2 changes: 1 addition & 1 deletion tools/autograd/gen_variable_factories.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
return at::${name}(${actuals});
})();
at::Tensor result =
autograd::make_variable_consuming(std::move(tensor), /*requires_grad=*/${requires_grad});
autograd::make_variable(std::move(tensor), /*requires_grad=*/${requires_grad});
${post_record_trace}
return result;
}
Expand Down
2 changes: 1 addition & 1 deletion tools/autograd/gen_variable_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -739,7 +739,7 @@ def wrap_view_single(output_var, base_var):
extra_wrapping_stmts.append(stmt)
return call, extra_wrapping_stmts
else:
return 'as_variable({})'.format(call), []
return 'as_variable(std::move({}))'.format(call), []

def enforce_same_tensorimpl_and_storage(env, call):
save_ptrs_stmts = []
Expand Down
46 changes: 13 additions & 33 deletions torch/csrc/autograd/variable.h
Original file line number Diff line number Diff line change
Expand Up @@ -112,16 +112,6 @@ struct TORCH_API Variable : public at::Tensor {
bool requires_grad,
bool allow_tensor_metadata_change);

/// Creates a `Variable` from the given `Tensor`, consuming its underlying `TensorImpl`.
/// This is intended to be used from functions that immediately create a `Tensor`,
/// convert it to a `Variable`, and then free it; it has been found to
/// decrease the overhead of those operations, in some situations.
/// The comments about `requires_grad` and `data` on the above version also apply to this one.
friend Variable make_variable_consuming(
at::Tensor data,
bool requires_grad,
bool allow_tensor_metadata_change);

/// Creates a `Variable` from the given `Tensor`, copying its underlying `TensorImpl`.
/// `gradient_edge` should be a (function, input_nr) pair specifying the function
/// in the autograd graph, and what particular input of that function, this
Expand Down Expand Up @@ -535,29 +525,19 @@ inline Variable make_variable(
!data.is_variable(),
"Must not create a new variable from a variable, use its .tensor_data()");
if (data.defined()) {
auto data_impl_copy = data.getIntrusivePtr()->shallow_copy_and_detach(
/*version_counter=*/0,
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
data_impl_copy->set_autograd_meta(c10::guts::make_unique<Variable::AutogradMeta>(
data_impl_copy.get(), requires_grad));
return Variable(data_impl_copy);
}
return Variable();
}

inline Variable make_variable_consuming(
at::Tensor data,
bool requires_grad = false,
bool allow_tensor_metadata_change = true) {
TORCH_CHECK(
!data.is_variable(),
"Must not create a new variable from a variable, use its .tensor_data()");
if (data.defined()) {
AT_ASSERT(data.getIntrusivePtr().use_count() == 1);
auto data_impl = data.getIntrusivePtr();
data_impl->set_allow_tensor_metadata_change(allow_tensor_metadata_change);
data_impl->set_autograd_meta(c10::guts::make_unique<Variable::AutogradMeta>(data_impl.get(), requires_grad));
return Variable(std::move(data_impl));
if (data.getIntrusivePtr().use_count() == 1 && data.getIntrusivePtr()->unique_version()) {
auto data_impl = data.getIntrusivePtr();
data_impl->set_allow_tensor_metadata_change(allow_tensor_metadata_change);
data_impl->set_autograd_meta(c10::guts::make_unique<Variable::AutogradMeta>(data_impl.get(), requires_grad));
return Variable(std::move(data_impl));
} else {
auto data_impl_copy = data.getIntrusivePtr()->shallow_copy_and_detach(
/*version_counter=*/0,
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
data_impl_copy->set_autograd_meta(c10::guts::make_unique<Variable::AutogradMeta>(
data_impl_copy.get(), requires_grad));
return Variable(data_impl_copy);
}
}
return Variable();
}
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/distributed/c10d/reducer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -457,7 +457,7 @@ void Reducer::initialize_buckets(
// This must be a Variable because as of Apr 2019 there is still
// a distinction between the Tensor and Variable types, and it
// is not recommended (or sometimes even possible) to mix and match.
replica.contents = torch::autograd::make_variable_consuming(
replica.contents = torch::autograd::make_variable(
at::empty({static_cast<long>(offset)}, options));
}

Expand Down

0 comments on commit 815e73b

Please sign in to comment.