Skip to content

Commit

Permalink
[ad] Add support for power functions (RobotLocomotion#17655)
Browse files Browse the repository at this point in the history
  • Loading branch information
jwnimmer-tri authored Aug 18, 2022
1 parent 9a0f31a commit 7abea05
Show file tree
Hide file tree
Showing 11 changed files with 519 additions and 4 deletions.
5 changes: 5 additions & 0 deletions common/ad/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -80,13 +80,18 @@ drake_cc_googletest(
"test/standard_operations_cosh_test.cc",
"test/standard_operations_dec_test.cc",
"test/standard_operations_div_test.cc",
"test/standard_operations_exp_test.cc",
"test/standard_operations_inc_test.cc",
"test/standard_operations_integer_test.cc",
"test/standard_operations_log_test.cc",
"test/standard_operations_max_test.cc",
"test/standard_operations_min_test.cc",
"test/standard_operations_mul_test.cc",
"test/standard_operations_pow_special_test.cc",
"test/standard_operations_pow_test.cc",
"test/standard_operations_sin_test.cc",
"test/standard_operations_sinh_test.cc",
"test/standard_operations_sqrt_test.cc",
"test/standard_operations_stream_test.cc",
"test/standard_operations_sub_test.cc",
"test/standard_operations_tan_test.cc",
Expand Down
22 changes: 18 additions & 4 deletions common/ad/internal/partials.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,17 @@ Partials::Partials(Eigen::Index size, Eigen::Index offset, double coeff)
Partials::Partials(const Eigen::Ref<const Eigen::VectorXd>& value)
: derivatives_{value} {}

void Partials::MatchSizeOf(const Partials& other) {
if (other.size() == 0) {
return;
}
if (size() == 0) {
derivatives_ = Eigen::VectorXd::Zero(other.size());
return;
}
ThrowIfDifferentSize(other);
}

void Partials::Add(const Partials& other) {
AddScaled(1.0, other);
}
Expand All @@ -53,15 +64,18 @@ void Partials::AddScaled(double scale, const Partials& other) {
derivatives_ = scale * other.derivatives_;
return;
}
ThrowIfDifferentSize(other);
derivatives_ += scale * other.derivatives_;
}

void Partials::ThrowIfDifferentSize(const Partials& other) {
if (size() != other.size()) {
throw std::logic_error(fmt::format(
"The size of AutoDiff partial derivative vectors must be uniform"
" throughout the computation, but two different sizes ({} and {})"
" were encountered at runtime. The derivatives were {} and {}.",
size(), other.size(), derivatives_.transpose(),
other.derivatives_.transpose()));
" were encountered at runtime.",
size(), other.size()));
}
derivatives_ += scale * other.derivatives_;
}

} // namespace internal
Expand Down
10 changes: 10 additions & 0 deletions common/ad/internal/partials.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,14 @@ class Partials {
return derivatives_.size();
}

/* Updates `this` to be the same size as `other`.
If `this` and `other` are already the same size then does nothing.
Otherwise, if `other` has size 0 then does nothing.
Otherwise, if `this` has size 0 then sets this to a zero vector of size
`other.size`.
Otherwise, throws an exception for mismatched sizes. */
void MatchSizeOf(const Partials& other);

/* Set this to zero. */
void SetZero() {
derivatives_.setZero();
Expand Down Expand Up @@ -80,6 +88,8 @@ class Partials {
}

private:
void ThrowIfDifferentSize(const Partials& other);

// TODO(jwnimmer-tri) Replace this implementation with a more efficient
// representation.
Eigen::VectorXd derivatives_;
Expand Down
130 changes: 130 additions & 0 deletions common/ad/internal/standard_operations.cc
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,136 @@ AutoDiff tanh(AutoDiff x) {
return x;
}

AutoDiff exp(AutoDiff x) {
// ∂/∂x eˣ = eˣ
const double new_value = std::exp(x.value());
x.partials().Mul(new_value);
x.value() = new_value;
return x;
}

AutoDiff log(AutoDiff x) {
const double new_value = std::log(x.value());
// ∂/∂x ln(x) = x⁻¹
x.partials().Div(x.value());
x.value() = new_value;
return x;
}

namespace {

constexpr double kNaN = std::numeric_limits<double>::quiet_NaN();

// Iterates over the partials of `input`. For any non-zero elements, sets the
// corresponding partial in `output` to NaN.
// @pre output->MatchSizeOf(input) has already been performed.
void UndefineOutputGradWhereInputGradNonzero(
const AutoDiff& input, AutoDiff* output) {
DRAKE_DEMAND(output != nullptr);

// If either input or output are empty, there's nothing to do.
if (output->partials().size() == 0) {
// MatchSizeOf guarantees this.
DRAKE_ASSERT(input.partials().size() == 0);
return;
}
if (input.partials().size() == 0) {
return;
}

// In case input and output are the same object, we need to call the
// non-const member function first.
auto& output_grad = output->partials().get_raw_storage_mutable();
const auto& input_grad = input.partials().make_const_xpr();

// Update the `output` per our API contract.
const int size = output_grad.size();
DRAKE_ASSERT(size == input_grad.size());
for (int i = 0; i < size; ++i) {
if (!(input_grad[i] == 0.0)) {
output_grad[i] = kNaN;
}
}
}

} // namespace

AutoDiff pow(AutoDiff base_ad, const AutoDiff& exp_ad) {
// It's convenient to immediately set up the result, so we'll need to take a
// picture of `base_ad.value()` first. Might as well do `exp` for convenience.
const double base = base_ad.value();
const double exp = exp_ad.value();

// Result starts out holding the proper return value, but its partials are
// just grad(base) to start. We'll adjust them at the end; for now, just
// set them to the correct size.
AutoDiff result = std::move(base_ad);
result.value() = std::pow(base, exp);
result.partials().MatchSizeOf(exp_ad.partials());

// If any of {base, exp, result} are NaN, then grad(result) is always NaN.
if (std::isnan(result.value()) || std::isnan(base) || std::isnan(exp)) {
result.partials().Mul(kNaN);
return result;
}

// When dealing with infinities (or a zero base where a sign-change to the
// exponent introduces infinities), trying to compute well-defined gradients
// with appropriate symmetries is impractical. In that case, when grad(base)
// and grad(exp) are both zero we'll leave grad(result) as zero, but otherwise
// any non-zero input gradient becomes ill-defined in the result.
if (base == 0 || !std::isfinite(base) || !std::isfinite(exp)) {
UndefineOutputGradWhereInputGradNonzero(result, &result);
UndefineOutputGradWhereInputGradNonzero(exp_ad, &result);
return result;
}

// For the gradient, we have:
// ∂/∂v bˣ
// = ∂/∂v eˡⁿ⁽ᵇ⁾ˣ # Power via logarithms
// = eˡⁿ⁽ᵇ⁾ˣ ∂/∂v ln(b)x # Derivative of exponentiation
// = bˣ ∂/∂v ln(b)x # Undo power via logarithms
// = bˣ (x ∂/∂v ln(b) + ln(b) ∂x/∂v) # Multiplication chain rule
// = bˣ (xb⁻¹ ∂b/∂v + ln(b) ∂x/∂v) # Derivative of logarithm
// = xbˣ⁻¹ ∂b/∂v + bˣln(b) ∂x/∂v # Distribute

// Account for the contribution of grad(base) on grad(result).
// Don't try to compute (xbˣ⁻¹) with x == 0, in case it comes out as a NaN
// (e.g., 0 * ∞). Instead, just assume that the x == 0 wins, such that the
// grad(base) has no contribution to the result.
const double base_grad_scale = (exp == 0) ? 0 : exp * std::pow(base, exp - 1);
DRAKE_DEMAND(std::isfinite(base_grad_scale));
result.partials().Mul(base_grad_scale);

// Account for the contribution of grad(exp) on grad(result).
if (base < 0) {
UndefineOutputGradWhereInputGradNonzero(exp_ad, &result);
} else {
DRAKE_DEMAND(base > 0);
const double exp_grad_scale = result.value() * std::log(base);
DRAKE_DEMAND(std::isfinite(exp_grad_scale));
result.partials().AddScaled(exp_grad_scale, exp_ad.partials());
}

return result;
}

AutoDiff pow(double base, const AutoDiff& exp) {
return pow(AutoDiff{base}, exp);
}

AutoDiff pow(AutoDiff base, double exp) {
return pow(std::move(base), AutoDiff{exp});
}

AutoDiff sqrt(AutoDiff x) {
// ∂/∂x x¹ᐟ² = ½x⁻¹ᐟ² = 1/(2x¹ᐟ²)
const double new_value = std::sqrt(x.value());
x.partials().Div(2 * new_value);
x.value() = new_value;
return x;
}

AutoDiff ceil(AutoDiff x) {
x.value() = std::ceil(x.value());
x.partials().SetZero();
Expand Down
44 changes: 44 additions & 0 deletions common/ad/internal/standard_operations.h
Original file line number Diff line number Diff line change
Expand Up @@ -426,6 +426,50 @@ inline AutoDiff abs2(AutoDiff x) {

//@}

/// @name Math functions: Exponential and Power functions
///
/// https://en.cppreference.com/w/cpp/numeric/math#Exponential_functions
///
/// https://en.cppreference.com/w/cpp/numeric/math#Power_functions
//@{

/** ADL overload to mimic std::exp from <cmath>. */
AutoDiff exp(AutoDiff x);

/** ADL overload to mimic std::log from <cmath>. */
AutoDiff log(AutoDiff x);

/** ADL overload to mimic std::pow from <cmath>.
The resulting partial derivative ∂/∂vᵢ is undefined (i.e., NaN) for all of the
following cases:
- base is NaN
- exp is NaN
- pow(base, exp) is NaN
- ∂base/∂vᵢ is non-zero and either:
- base is zero or not finite, or
- exp is not finite
- ∂exp/∂vᵢ is non-zero and either:
- base is not positive-finite, or
- exp is not finite
In all other cases, if the base and exp partial derivatives were well-defined
then the resulting partial derivatives will also be well-defined. */
AutoDiff pow(AutoDiff base, const AutoDiff& exp);

/** ADL overload to mimic std::pow from <cmath>.
Refer to pow(AutoDiff,const AutoDiff&) for an explanation of special cases. */
AutoDiff pow(double base, const AutoDiff& exp);

/** ADL overload to mimic std::pow from <cmath>.
Refer to pow(AutoDiff,const AutoDiff&) for an explanation of special cases. */
AutoDiff pow(AutoDiff base, double exp);

/** ADL overload to mimic std::sqrt from <cmath>. */
AutoDiff sqrt(AutoDiff x);

//@}

/// @name Math functions: Trigonometric functions
///
/// https://en.cppreference.com/w/cpp/numeric/math#Trigonometric_functions
Expand Down
22 changes: 22 additions & 0 deletions common/ad/test/partials_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,28 @@ TEST_F(PartialsTest, SetZeroFromFullCtor) {
EXPECT_TRUE(CompareMatrices(dut.make_const_xpr(), Vector4d::Zero()));
}

TEST_F(PartialsTest, MatchSizeOf) {
Partials dut{4, 2};

// No-op to match size == 0.
dut.MatchSizeOf(Partials{});
EXPECT_TRUE(CompareMatrices(dut.make_const_xpr(), Vector4d::Unit(2)));

// No-op to match size == 4.
dut.MatchSizeOf(Partials{4, 0});
EXPECT_TRUE(CompareMatrices(dut.make_const_xpr(), Vector4d::Unit(2)));

// Inherits size == 4 from dut.
Partials foo;
foo.MatchSizeOf(dut);
EXPECT_TRUE(CompareMatrices(foo.make_const_xpr(), Vector4d::Zero()));

// Mismatched sizes.
const Partials wrong{5, 0};
DRAKE_EXPECT_THROWS_MESSAGE(foo.MatchSizeOf(wrong),
".*different sizes.*");
}

TEST_F(PartialsTest, Mul) {
Partials dut{4, 2};

Expand Down
17 changes: 17 additions & 0 deletions common/ad/test/standard_operations_exp_test.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#include "drake/common/ad/auto_diff.h"
#include "drake/common/ad/test/standard_operations_test.h"

namespace drake {
namespace test {
namespace {

TEST_F(StandardOperationsTest, Exp) {
CHECK_UNARY_FUNCTION(exp, x, y, 0.1);
CHECK_UNARY_FUNCTION(exp, x, y, -0.1);
CHECK_UNARY_FUNCTION(exp, y, x, 0.1);
CHECK_UNARY_FUNCTION(exp, y, x, -0.1);
}

} // namespace
} // namespace test
} // namespace drake
17 changes: 17 additions & 0 deletions common/ad/test/standard_operations_log_test.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#include "drake/common/ad/auto_diff.h"
#include "drake/common/ad/test/standard_operations_test.h"

namespace drake {
namespace test {
namespace {

TEST_F(StandardOperationsTest, Log) {
CHECK_UNARY_FUNCTION(log, x, y, 0.1);
CHECK_UNARY_FUNCTION(log, x, y, -0.1);
CHECK_UNARY_FUNCTION(log, y, x, 0.1);
CHECK_UNARY_FUNCTION(log, y, x, -0.1);
}

} // namespace
} // namespace test
} // namespace drake
Loading

0 comments on commit 7abea05

Please sign in to comment.