From d11a8dc4c18f481d5316916627f43182d524eb9c Mon Sep 17 00:00:00 2001 From: Ante Qu <7333308+antequ@users.noreply.github.com> Date: Fri, 14 Feb 2020 18:13:56 -0800 Subject: [PATCH] Remove -inl patterns in systems/analysis and move implementations to cc files (#12657) --- systems/analysis/BUILD.bazel | 6 +- .../analysis/antiderivative_function-inl.h | 5 - systems/analysis/antiderivative_function.cc | 10 +- systems/analysis/antiderivative_function.h | 5 + .../analysis/bogacki_shampine3_integrator.cc | 169 +++- .../analysis/bogacki_shampine3_integrator.h | 159 +--- systems/analysis/hermitian_dense_output.h | 5 - .../analysis/implicit_euler_integrator-inl.h | 470 ----------- systems/analysis/implicit_euler_integrator.cc | 460 ++++++++++- systems/analysis/implicit_euler_integrator.h | 10 +- systems/analysis/implicit_integrator.cc | 435 ++++++++++- systems/analysis/implicit_integrator.h | 423 +--------- systems/analysis/initial_value_problem-inl.h | 277 ------- systems/analysis/initial_value_problem.cc | 267 ++++++- systems/analysis/initial_value_problem.h | 8 +- systems/analysis/integrator_base.cc | 468 +++++++++++ systems/analysis/integrator_base.h | 462 +---------- systems/analysis/lyapunov.cc | 1 + systems/analysis/lyapunov.h | 3 +- systems/analysis/radau_integrator.cc | 730 +++++++++++++++++- systems/analysis/radau_integrator.h | 729 +---------------- .../analysis/runge_kutta3_integrator-inl.h | 163 ---- systems/analysis/runge_kutta3_integrator.cc | 154 +++- systems/analysis/runge_kutta3_integrator.h | 5 + systems/analysis/runge_kutta5_integrator.cc | 246 +++++- systems/analysis/runge_kutta5_integrator.h | 244 +----- .../scalar_initial_value_problem-inl.h | 5 - .../analysis/scalar_initial_value_problem.cc | 11 +- .../analysis/scalar_initial_value_problem.h | 6 +- .../analysis/semi_explicit_euler_integrator.h | 1 - systems/analysis/simulator.cc | 669 +++++++++++++++- systems/analysis/simulator.h | 667 +--------------- systems/analysis/simulator_gflags.h | 1 - .../test/initial_value_problem_test.cc | 1 - .../test/scalar_initial_value_problem_test.cc | 1 - .../controlled_spring_mass_system.cc | 7 +- .../controlled_spring_mass_system.h | 4 + 37 files changed, 3633 insertions(+), 3654 deletions(-) delete mode 100644 systems/analysis/antiderivative_function-inl.h delete mode 100644 systems/analysis/implicit_euler_integrator-inl.h delete mode 100644 systems/analysis/initial_value_problem-inl.h create mode 100644 systems/analysis/integrator_base.cc delete mode 100644 systems/analysis/runge_kutta3_integrator-inl.h delete mode 100644 systems/analysis/scalar_initial_value_problem-inl.h diff --git a/systems/analysis/BUILD.bazel b/systems/analysis/BUILD.bazel index d54677e463eb..07451c91aa28 100644 --- a/systems/analysis/BUILD.bazel +++ b/systems/analysis/BUILD.bazel @@ -128,6 +128,7 @@ drake_cc_library( drake_cc_library( name = "integrator_base", + srcs = ["integrator_base.cc"], hdrs = ["integrator_base.h"], deps = [ ":dense_output", @@ -185,7 +186,6 @@ drake_cc_library( srcs = ["runge_kutta3_integrator.cc"], hdrs = [ "runge_kutta3_integrator.h", - "runge_kutta3_integrator-inl.h", ], deps = [ ":integrator_base", @@ -207,7 +207,6 @@ drake_cc_library( srcs = ["implicit_euler_integrator.cc"], hdrs = [ "implicit_euler_integrator.h", - "implicit_euler_integrator-inl.h", ], deps = [ ":implicit_integrator", @@ -271,7 +270,6 @@ drake_cc_library( ], hdrs = [ "initial_value_problem.h", - "initial_value_problem-inl.h", ], deps = [ ":dense_output", @@ -293,7 +291,6 @@ drake_cc_library( ], hdrs = [ "scalar_initial_value_problem.h", - "scalar_initial_value_problem-inl.h", ], deps = [ ":initial_value_problem", @@ -312,7 +309,6 @@ drake_cc_library( ], hdrs = [ "antiderivative_function.h", - "antiderivative_function-inl.h", ], deps = [ ":scalar_dense_output", diff --git a/systems/analysis/antiderivative_function-inl.h b/systems/analysis/antiderivative_function-inl.h deleted file mode 100644 index f3d399a141db..000000000000 --- a/systems/analysis/antiderivative_function-inl.h +++ /dev/null @@ -1,5 +0,0 @@ -#pragma once - -// Instantiation of AntiderivateFunction for T other than double -// requires ScalarInitialValueProblem definition. -#include "drake/systems/analysis/scalar_initial_value_problem-inl.h" diff --git a/systems/analysis/antiderivative_function.cc b/systems/analysis/antiderivative_function.cc index 0b2b907ea43a..6ab6906fa691 100644 --- a/systems/analysis/antiderivative_function.cc +++ b/systems/analysis/antiderivative_function.cc @@ -1,13 +1,5 @@ #include "drake/systems/analysis/antiderivative_function.h" -#include "drake/systems/analysis/antiderivative_function-inl.h" - -#include "drake/common/default_scalars.h" - -namespace drake { -namespace systems { DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( - class AntiderivativeFunction); + class drake::systems::AntiderivativeFunction) -} // namespace systems -} // namespace drake diff --git a/systems/analysis/antiderivative_function.h b/systems/analysis/antiderivative_function.h index 13ae9ae07aad..3c9272f09dea 100644 --- a/systems/analysis/antiderivative_function.h +++ b/systems/analysis/antiderivative_function.h @@ -5,6 +5,7 @@ #include #include +#include "drake/common/default_scalars.h" #include "drake/common/drake_copyable.h" #include "drake/common/eigen_types.h" #include "drake/common/unused.h" @@ -213,3 +214,7 @@ class AntiderivativeFunction { } // namespace systems } // namespace drake + +DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( + class ::drake::systems::AntiderivativeFunction) + diff --git a/systems/analysis/bogacki_shampine3_integrator.cc b/systems/analysis/bogacki_shampine3_integrator.cc index 93843298d200..8e1dc9796397 100644 --- a/systems/analysis/bogacki_shampine3_integrator.cc +++ b/systems/analysis/bogacki_shampine3_integrator.cc @@ -1,7 +1,172 @@ #include "drake/systems/analysis/bogacki_shampine3_integrator.h" -#include "drake/common/autodiff.h" +#include +#include + +#include "drake/common/drake_assert.h" +#include "drake/common/unused.h" + +namespace drake { +namespace systems { + +/* + * Bogacki-Shampine-specific initialization function. + * @throws std::logic_error if *neither* the initial step size target nor the + * maximum step size have been set before calling. + */ +template +void BogackiShampine3Integrator::DoInitialize() { + using std::isnan; + const double kDefaultAccuracy = 1e-3; // Good for this particular integrator. + const double kLoosestAccuracy = 1e-1; // Integrator specific. + const double kMaxStepFraction = 0.1; // Fraction of max step size for + // less aggressive first step. + + // Set an artificial step size target, if not set already. + if (isnan(this->get_initial_step_size_target())) { + // Verify that maximum step size has been set. + if (isnan(this->get_maximum_step_size())) + throw std::logic_error("Neither initial step size target nor maximum " + "step size has been set!"); + + this->request_initial_step_size_target( + this->get_maximum_step_size() * kMaxStepFraction); + } + + // Sets the working accuracy to a good value. + double working_accuracy = this->get_target_accuracy(); + + // If the user asks for accuracy that is looser than the loosest this + // integrator can provide, use the integrator's loosest accuracy setting + // instead. + if (working_accuracy > kLoosestAccuracy) + working_accuracy = kLoosestAccuracy; + else if (isnan(working_accuracy)) + working_accuracy = kDefaultAccuracy; + this->set_accuracy_in_use(working_accuracy); +} + +template +bool BogackiShampine3Integrator::DoStep(const T& h) { + using std::abs; + Context& context = *this->get_mutable_context(); + const T t0 = context.get_time(); + + // CAUTION: This is performance-sensitive inner loop code that uses dangerous + // long-lived references into state and cache to avoid unnecessary copying and + // cache invalidation. Be careful not to insert calls to methods that could + // invalidate any of these references before they are used. + + // We use Butcher tableau notation with labels for each coefficient: + // 0 (c1) | + // 1/2 (c2) | 1/2 (a21) + // 3/4 (c3) | 0 (a31) 3/4 (a32) + // 1 (c4) | 2/9 (a41) 1/3 (a42) 4/9 (a43) + // --------------------------------------------------------------------------- + // 2/9 (b1) 1/3 (b2) 4/9 (b3) 0 (b4) + // 7/24 (d1) 1/4 (d2) 1/3 (d3) 1/8 (d4) + + // Save the continuous state at t₀. + context.get_continuous_state_vector().CopyToPreSizedVector(&save_xc0_); + + // Evaluate the derivative at t₀, xc₀. + derivs1_->get_mutable_vector().SetFrom( + this->EvalTimeDerivatives(context).get_vector()); + const VectorBase& k1 = derivs1_->get_vector(); + + // Cache: k1 references a *copy* of the derivative result so is immune + // to subsequent evaluations. + + // Compute the first intermediate state and derivative (i.e., Stage 2). + // This call marks t- and xc-dependent cache entries out of date, including + // the derivative cache entry. Note that xc is a live reference into the + // context -- subsequent changes through that reference are unobservable so + // will require manual out-of-date notifications. + const double c2 = 1.0 / 2; + const double a21 = 1.0 / 2; + VectorBase& xc = context.SetTimeAndGetMutableContinuousStateVector( + t0 + c2 * h); + xc.PlusEqScaled(a21 * h, k1); + + // Evaluate the derivative (denoted k2) at t₀ + c2 * h, xc₀ + a21 * h * k1. + derivs2_->get_mutable_vector().SetFrom( + this->EvalTimeDerivatives(context).get_vector()); + const VectorBase& k2 = derivs2_->get_vector(); // xcdot⁽ᵃ⁾ + + // Cache: k2 references a *copy* of the derivative result so is immune + // to subsequent evaluations. + + // Compute the second intermediate state and derivative (i.e., Stage 3). + const double c3 = 3.0 / 4; + const double a31 = 0.0; + const double a32 = 3.0 / 4; + // This call marks t- and xc-dependent cache entries out of date, including + // the derivative cache entry. (We already have the xc reference but must + // issue the out-of-date notification here since we're about to change it.) + context.SetTimeAndNoteContinuousStateChange(t0 + c3 * h); + + // Evaluate the derivative (denoted k3) at t₀ + c3 * h, + // xc₀ + a31 * h * k1 + a32 * h * k2. + // Note that a31 is zero, so we leave that term out. + unused(a31); + xc.SetFromVector(save_xc0_); // Restore xc ← xc₀. + xc.PlusEqScaled({{a32 * h, k2}}); + derivs3_->get_mutable_vector().SetFrom( + this->EvalTimeDerivatives(context).get_vector()); + const VectorBase& k3 = derivs3_->get_vector(); + + // Compute the propagated solution (we're able to do this because b1 = a41, + // b2 = a42, b3 = a43, and b4 = 0). + const double c4 = 1.0; + const double a41 = 2.0 / 9; + const double a42 = 1.0 / 3; + const double a43 = 4.0 / 9; + // This call marks t- and xc-dependent cache entries out of date, including + // the derivative cache entry. (We already have the xc reference but must + // issue the out-of-date notification here since we're about to change it.) + context.SetTimeAndNoteContinuousStateChange(t0 + c4 * h); + + // Evaluate the derivative (denoted k4) at t₀ + c4 * h, xc₀ + a41 * h * k1 + + // a42 * h * k2 + a43 * h * k3. This will be used to compute the second + // order solution. + xc.SetFromVector(save_xc0_); // Restore xc ← xc₀. + xc.PlusEqScaled({{a41 * h, k1}, {a42 * h, k2}, {a43 * h, k3}}); + const ContinuousState& derivs4 = this->EvalTimeDerivatives(context); + const VectorBase& k4 = derivs4.get_vector(); + + // WARNING: k4 is a live reference into the cache. Be careful of adding + // code below that modifies the context until after k4 is used below. In fact, + // it is best not to modify the context from here on out, as modifying the + // context will effectively destroy the FSAL benefit that this integrator + // provides. + + // Compute the second order solution used for the error estimate and then + // the error estimate itself. The first part of this formula (the part that + // uses the d coefficients) computes the second error solution. The last part + // subtracts the third order propagated solution from that second order + // solution, thereby yielding the error estimate. + const double d1 = 7.0 / 24; + const double d2 = 1.0 / 4; + const double d3 = 1.0 / 3; + const double d4 = 1.0 / 8; + err_est_vec_->SetZero(); + err_est_vec_->PlusEqScaled({{(a41 - d1) * h, k1}, + {(a42 - d2) * h, k2}, + {(a43 - d3) * h, k3}, + {(-d4) * h, k4}}); + + // If the size of the system has changed, the error estimate will no longer + // be sized correctly. Verify that the error estimate is the correct size. + DRAKE_DEMAND(this->get_error_estimate()->size() == xc.size()); + this->get_mutable_error_estimate()->SetFromVector(err_est_vec_-> + CopyToVector().cwiseAbs()); + + // Bogacki-Shampine always succeeds in taking its desired step. + return true; +} + +} // namespace systems +} // namespace drake DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( class ::drake::systems::BogackiShampine3Integrator) - diff --git a/systems/analysis/bogacki_shampine3_integrator.h b/systems/analysis/bogacki_shampine3_integrator.h index 4dc409df691b..a3faea05a165 100644 --- a/systems/analysis/bogacki_shampine3_integrator.h +++ b/systems/analysis/bogacki_shampine3_integrator.h @@ -1,10 +1,9 @@ #pragma once #include -#include +#include "drake/common/default_scalars.h" #include "drake/common/drake_copyable.h" -#include "drake/common/unused.h" #include "drake/systems/analysis/integrator_base.h" namespace drake { @@ -76,162 +75,6 @@ class BogackiShampine3Integrator final : public IntegratorBase { std::unique_ptr> derivs1_, derivs2_, derivs3_; }; -/** - * Bogacki-Shampine-specific initialization function. - * @throws std::logic_error if *neither* the initial step size target nor the - * maximum step size have been set before calling. - */ -template -void BogackiShampine3Integrator::DoInitialize() { - using std::isnan; - const double kDefaultAccuracy = 1e-3; // Good for this particular integrator. - const double kLoosestAccuracy = 1e-1; // Integrator specific. - const double kMaxStepFraction = 0.1; // Fraction of max step size for - // less aggressive first step. - - // Set an artificial step size target, if not set already. - if (isnan(this->get_initial_step_size_target())) { - // Verify that maximum step size has been set. - if (isnan(this->get_maximum_step_size())) - throw std::logic_error("Neither initial step size target nor maximum " - "step size has been set!"); - - this->request_initial_step_size_target( - this->get_maximum_step_size() * kMaxStepFraction); - } - - // Sets the working accuracy to a good value. - double working_accuracy = this->get_target_accuracy(); - - // If the user asks for accuracy that is looser than the loosest this - // integrator can provide, use the integrator's loosest accuracy setting - // instead. - if (working_accuracy > kLoosestAccuracy) - working_accuracy = kLoosestAccuracy; - else if (isnan(working_accuracy)) - working_accuracy = kDefaultAccuracy; - this->set_accuracy_in_use(working_accuracy); -} - -template -bool BogackiShampine3Integrator::DoStep(const T& h) { - using std::abs; - Context& context = *this->get_mutable_context(); - const T t0 = context.get_time(); - - // CAUTION: This is performance-sensitive inner loop code that uses dangerous - // long-lived references into state and cache to avoid unnecessary copying and - // cache invalidation. Be careful not to insert calls to methods that could - // invalidate any of these references before they are used. - - // We use Butcher tableau notation with labels for each coefficient: - // 0 (c1) | - // 1/2 (c2) | 1/2 (a21) - // 3/4 (c3) | 0 (a31) 3/4 (a32) - // 1 (c4) | 2/9 (a41) 1/3 (a42) 4/9 (a43) - // --------------------------------------------------------------------------- - // 2/9 (b1) 1/3 (b2) 4/9 (b3) 0 (b4) - // 7/24 (d1) 1/4 (d2) 1/3 (d3) 1/8 (d4) - - // Save the continuous state at t₀. - context.get_continuous_state_vector().CopyToPreSizedVector(&save_xc0_); - - // Evaluate the derivative at t₀, xc₀. - derivs1_->get_mutable_vector().SetFrom( - this->EvalTimeDerivatives(context).get_vector()); - const VectorBase& k1 = derivs1_->get_vector(); - - // Cache: k1 references a *copy* of the derivative result so is immune - // to subsequent evaluations. - - // Compute the first intermediate state and derivative (i.e., Stage 2). - // This call marks t- and xc-dependent cache entries out of date, including - // the derivative cache entry. Note that xc is a live reference into the - // context -- subsequent changes through that reference are unobservable so - // will require manual out-of-date notifications. - const double c2 = 1.0 / 2; - const double a21 = 1.0 / 2; - VectorBase& xc = context.SetTimeAndGetMutableContinuousStateVector( - t0 + c2 * h); - xc.PlusEqScaled(a21 * h, k1); - - // Evaluate the derivative (denoted k2) at t₀ + c2 * h, xc₀ + a21 * h * k1. - derivs2_->get_mutable_vector().SetFrom( - this->EvalTimeDerivatives(context).get_vector()); - const VectorBase& k2 = derivs2_->get_vector(); // xcdot⁽ᵃ⁾ - - // Cache: k2 references a *copy* of the derivative result so is immune - // to subsequent evaluations. - - // Compute the second intermediate state and derivative (i.e., Stage 3). - const double c3 = 3.0 / 4; - const double a31 = 0.0; - const double a32 = 3.0 / 4; - // This call marks t- and xc-dependent cache entries out of date, including - // the derivative cache entry. (We already have the xc reference but must - // issue the out-of-date notification here since we're about to change it.) - context.SetTimeAndNoteContinuousStateChange(t0 + c3 * h); - - // Evaluate the derivative (denoted k3) at t₀ + c3 * h, - // xc₀ + a31 * h * k1 + a32 * h * k2. - // Note that a31 is zero, so we leave that term out. - unused(a31); - xc.SetFromVector(save_xc0_); // Restore xc ← xc₀. - xc.PlusEqScaled({{a32 * h, k2}}); - derivs3_->get_mutable_vector().SetFrom( - this->EvalTimeDerivatives(context).get_vector()); - const VectorBase& k3 = derivs3_->get_vector(); - - // Compute the propagated solution (we're able to do this because b1 = a41, - // b2 = a42, b3 = a43, and b4 = 0). - const double c4 = 1.0; - const double a41 = 2.0 / 9; - const double a42 = 1.0 / 3; - const double a43 = 4.0 / 9; - // This call marks t- and xc-dependent cache entries out of date, including - // the derivative cache entry. (We already have the xc reference but must - // issue the out-of-date notification here since we're about to change it.) - context.SetTimeAndNoteContinuousStateChange(t0 + c4 * h); - - // Evaluate the derivative (denoted k4) at t₀ + c4 * h, xc₀ + a41 * h * k1 + - // a42 * h * k2 + a43 * h * k3. This will be used to compute the second - // order solution. - xc.SetFromVector(save_xc0_); // Restore xc ← xc₀. - xc.PlusEqScaled({{a41 * h, k1}, {a42 * h, k2}, {a43 * h, k3}}); - const ContinuousState& derivs4 = this->EvalTimeDerivatives(context); - const VectorBase& k4 = derivs4.get_vector(); - - // WARNING: k4 is a live reference into the cache. Be careful of adding - // code below that modifies the context until after k4 is used below. In fact, - // it is best not to modify the context from here on out, as modifying the - // context will effectively destroy the FSAL benefit that this integrator - // provides. - - // Compute the second order solution used for the error estimate and then - // the error estimate itself. The first part of this formula (the part that - // uses the d coefficients) computes the second error solution. The last part - // subtracts the third order propagated solution from that second order - // solution, thereby yielding the error estimate. - const double d1 = 7.0 / 24; - const double d2 = 1.0 / 4; - const double d3 = 1.0 / 3; - const double d4 = 1.0 / 8; - err_est_vec_->SetZero(); - err_est_vec_->PlusEqScaled({{(a41 - d1) * h, k1}, - {(a42 - d2) * h, k2}, - {(a43 - d3) * h, k3}, - {(-d4) * h, k4}}); - - // If the size of the system has changed, the error estimate will no longer - // be sized correctly. Verify that the error estimate is the correct size. - DRAKE_DEMAND(this->get_error_estimate()->size() == xc.size()); - this->get_mutable_error_estimate()->SetFromVector(err_est_vec_-> - CopyToVector().cwiseAbs()); - - // Bogacki-Shampine always succeeds in taking its desired step. - return true; -} - } // namespace systems } // namespace drake diff --git a/systems/analysis/hermitian_dense_output.h b/systems/analysis/hermitian_dense_output.h index f88614b3fd22..4d0c3a112576 100644 --- a/systems/analysis/hermitian_dense_output.h +++ b/systems/analysis/hermitian_dense_output.h @@ -1,21 +1,16 @@ #pragma once #include -#include #include #include -#include #include #include -#include "drake/common/autodiff.h" #include "drake/common/drake_copyable.h" #include "drake/common/eigen_types.h" #include "drake/common/extract_double.h" #include "drake/common/trajectories/piecewise_polynomial.h" #include "drake/systems/analysis/stepwise_dense_output.h" -#include "drake/systems/framework/basic_vector.h" -#include "drake/systems/framework/vector_base.h" namespace drake { namespace systems { diff --git a/systems/analysis/implicit_euler_integrator-inl.h b/systems/analysis/implicit_euler_integrator-inl.h deleted file mode 100644 index 2c04c04e2a82..000000000000 --- a/systems/analysis/implicit_euler_integrator-inl.h +++ /dev/null @@ -1,470 +0,0 @@ -#pragma once - -/// @file -/// Template method implementations for implicit_euler_integrator.h. -/// Most users should only include that file, not this one. -/// For background, see https://drake.mit.edu/cxx_inl.html. - -/* clang-format off to disable clang-format-includes */ -#include "drake/systems/analysis/implicit_euler_integrator.h" -/* clang-format on */ - -#include -#include -#include -#include -#include - -#include "drake/common/text_logging.h" -#include "drake/math/autodiff.h" - -namespace drake { -namespace systems { - -template -void ImplicitEulerIntegrator::DoResetImplicitIntegratorStatistics() { - num_nr_iterations_ = 0; - num_err_est_nr_iterations_ = 0; - num_err_est_function_evaluations_ = 0; - num_err_est_jacobian_function_evaluations_ = 0; - num_err_est_jacobian_reforms_ = 0; - num_err_est_iter_factorizations_ = 0; -} - -template -void ImplicitEulerIntegrator::DoInitialize() { - using std::isnan; - - // Allocate storage for changes to state variables during Newton-Raphson. - dx_state_ = this->get_system().AllocateTimeDerivatives(); - - const double kDefaultAccuracy = 1e-1; // Good for this particular integrator. - const double kLoosestAccuracy = 5e-1; // Loosest accuracy is quite loose. - - // Set an artificial step size target, if not set already. - if (isnan(this->get_initial_step_size_target())) { - // Verify that maximum step size has been set. - if (isnan(this->get_maximum_step_size())) - throw std::logic_error("Neither initial step size target nor maximum " - "step size has been set!"); - - this->request_initial_step_size_target( - this->get_maximum_step_size()); - } - - // Sets the working accuracy to a good value. - double working_accuracy = this->get_target_accuracy(); - - // If the user asks for accuracy that is looser than the loosest this - // integrator can provide, use the integrator's loosest accuracy setting - // instead. - if (isnan(working_accuracy)) - working_accuracy = kDefaultAccuracy; - else if (working_accuracy > kLoosestAccuracy) - working_accuracy = kLoosestAccuracy; - this->set_accuracy_in_use(working_accuracy); - - // Reset the Jacobian matrix (so that recomputation is forced). - this->get_mutable_jacobian().resize(0, 0); - - // Initialize the embedded second order Runge-Kutta integrator. The maximum - // step size will be set to infinity because we will explicitly request the - // step sizes to be taken. - rk2_ = std::make_unique>( - this->get_system(), - std::numeric_limits::infinity() /* maximum step size */, - this->get_mutable_context()); -} - -template -void ImplicitEulerIntegrator::ComputeAndFactorImplicitEulerIterationMatrix( - const MatrixX& J, const T& h, - typename ImplicitIntegrator::IterationMatrix* iteration_matrix) { - const int n = J.rows(); - // TODO(edrumwri) Investigate using a move-type operation below. - // We form the iteration matrix in this particular way to avoid an O(n^2) - // subtraction as would be the case with: - // MatrixX::Identity(n, n) - J * h. - iteration_matrix->SetAndFactorIterationMatrix( - J * -h + MatrixX::Identity(n, n)); -} - -template -void ImplicitEulerIntegrator:: -ComputeAndFactorImplicitTrapezoidIterationMatrix( - const MatrixX& J, const T& h, - typename ImplicitIntegrator::IterationMatrix* iteration_matrix) { - const int n = J.rows(); - // TODO(edrumwri) Investigate using a move-type operation below. - // We form the iteration matrix in this particular way to avoid an O(n^2) - // subtraction as would be the case with: - // MatrixX::Identity(n, n) - J * h / 2. - iteration_matrix->SetAndFactorIterationMatrix( - J * (-h / 2.0) + MatrixX::Identity(n, n)); -} - -// Performs the bulk of the stepping computation for both implicit Euler and -// implicit trapezoid method; all those methods need to do is provide a -// residual function (`g`) and an iteration matrix computation and factorization -// function (`compute_and_factor_iteration_matrix`) specific to the -// particular integrator scheme and this method does the rest. -// @param t0 the time at the left end of the integration interval. -// @param h the integration step size (> 0) to attempt. -// @param xt0 the continuous state at t0. -// @param g the particular implicit function to compute the root of. -// @param compute_and_factor_iteration_matrix the function for computing and -// factorizing the iteration matrix. -// @param xtplus_guess the starting guess for x(t0+h) -- implicit Euler passes -// x(t0) since it has no better guess; implicit trapezoid passes -// implicit Euler's result for x(t0+h). -// @param[out] the iteration matrix to be used for the particular integration -// scheme (implicit Euler, implicit trapezoid), which will be -// computed and factored, if necessary. -// @param[out] the value for x(t0+h) on return. -// @param trial the attempt for this approach (1-4). StepAbstract() uses more -// computationally expensive methods as the trial numbers increase. -// @returns `true` if the method was successfully able to take an integration -// step of size h (or `false` otherwise). -// @note The time and continuous state in the context are indeterminate upon -// exit. -// TODO(edrumwri) Explicitly test this method's fallback logic (i.e., how it -// calls MaybeFreshenMatrices()) in a unit test). -template -bool ImplicitEulerIntegrator::StepAbstract( - const T& t0, const T& h, const VectorX& xt0, - const std::function()>& g, - const std::function&, const T&, - typename ImplicitIntegrator::IterationMatrix*)>& - compute_and_factor_iteration_matrix, - const VectorX& xtplus_guess, - typename ImplicitIntegrator::IterationMatrix* iteration_matrix, - VectorX* xtplus, int trial) { - using std::max; - using std::min; - - // Verify the trial number is valid. - DRAKE_ASSERT(trial >= 1 && trial <= 4); - - // Verify xtplus - DRAKE_ASSERT(xtplus && xtplus->size() == xt0.size()); - - DRAKE_LOGGER_DEBUG("StepAbstract() entered for t={}, h={}, trial={}", - t0, h, trial); - - // Start from the guess. - *xtplus = xtplus_guess; - DRAKE_LOGGER_DEBUG("Starting state: {}", xtplus->transpose()); - - // Advance the context time and state to compute derivatives at t0 + h. - const T tf = t0 + h; - Context* context = this->get_mutable_context(); - context->SetTimeAndContinuousState(tf, *xtplus); - - // Initialize the "last" state update norm; this will be used to detect - // convergence. - T last_dx_norm = std::numeric_limits::infinity(); - - // Calculate Jacobian and iteration matrices (and factorizations), as needed, - // around (t0, xt0). We do not do this calculation if full Newton is in use; - // the calculation will be performed at the beginning of the loop instead. - // TODO(edrumwri) Consider computing the Jacobian matrix around tf and/or - // xtplus. This would give a better Jacobian, but would - // complicate the logic, since the Jacobian would no longer - // (necessarily) be fresh upon fallback to a smaller step size. - if (!this->get_use_full_newton() && - !this->MaybeFreshenMatrices(t0, xt0, h, trial, - compute_and_factor_iteration_matrix, - iteration_matrix)) { - return false; - } - - // Do the Newton-Raphson iterations. - for (int i = 0; i < this->max_newton_raphson_iterations(); ++i) { - this->FreshenMatricesIfFullNewton(tf, *xtplus, h, - compute_and_factor_iteration_matrix, - iteration_matrix); - - // Evaluate the residual error using: - // g(x(t0+h)) = x(t0+h) - x(t0) - h f(t0+h,x(t0+h)). - VectorX goutput = g(); - - // Update the number of Newton-Raphson iterations. - num_nr_iterations_++; - - // Compute the state update using the equation A*x = -g(), where A is the - // iteration matrix. - // TODO(edrumwri): Allow caller to provide their own solver. - VectorX dx = iteration_matrix->Solve(-goutput); - - // Get the infinity norm of the weighted update vector. - dx_state_->get_mutable_vector().SetFromVector(dx); - T dx_norm = this->CalcStateChangeNorm(*dx_state_); - - // Update the state vector. - *xtplus += dx; - context->SetTimeAndContinuousState(tf, *xtplus); - - // Check for Newton-Raphson convergence. - typename ImplicitIntegrator::ConvergenceStatus status = - this->CheckNewtonConvergence(i, *xtplus, dx, dx_norm, last_dx_norm); - // If it converged, we're done. - if (status == ImplicitIntegrator::ConvergenceStatus::kConverged) - return true; - // If it diverged, we have to abort and try again. - if (status == ImplicitIntegrator::ConvergenceStatus::kDiverged) - break; - // Otherwise, continue to the next Newton-Raphson iteration. - DRAKE_DEMAND(status == - ImplicitIntegrator::ConvergenceStatus::kNotConverged); - - // Update the norm of the state update. - last_dx_norm = dx_norm; - } - - DRAKE_LOGGER_DEBUG("StepAbstract() convergence failed"); - - // If Jacobian and iteration matrix factorizations are not reused, there - // is nothing else we can try. Note that get_reuse() returns false if - // "full Newton-Raphson" mode is activated (see - // ImplicitIntegrator::get_use_full_newton()). - if (!this->get_reuse()) - return false; - - // Try StepAbstract again. That method will freshen Jacobians and iteration - // matrix factorizations as necessary. - return StepAbstract(t0, h, xt0, g, compute_and_factor_iteration_matrix, - xtplus_guess, iteration_matrix, xtplus, trial + 1); -} - -// Steps the system forward by a single step of at most h using the implicit -// Euler method. -// @param t0 the time at the left end of the integration interval. -// @param h the maximum time increment to step forward. -// @param xt0 the continuous state at t0. -// @param[out] the computed value for `x(t0+h)` on successful return. -// @returns `true` if the step of size `h` was successful, `false` otherwise. -// @note The time and continuous state in the context are indeterminate upon -// exit. -template -bool ImplicitEulerIntegrator::StepImplicitEuler(const T& t0, const T& h, - const VectorX& xt0, VectorX* xtplus) { - using std::abs; - - DRAKE_LOGGER_DEBUG("StepImplicitEuler(h={}) t={}", h, t0); - - // Set g for the implicit Euler method. - Context* context = this->get_mutable_context(); - std::function()> g = - [&xt0, h, context, this]() { - return (context->get_continuous_state().CopyToVector() - xt0 - - h * this->EvalTimeDerivatives(*context).CopyToVector()).eval(); - }; - - // Use the current state as the candidate value for the next state. - // [Hairer 1996] validates this choice (p. 120). - const VectorX& xtplus_guess = xt0; - - // Attempt the step. - return StepAbstract(t0, h, xt0, g, - ComputeAndFactorImplicitEulerIterationMatrix, - xtplus_guess, &ie_iteration_matrix_, &*xtplus); -} - -// Steps forward by a single step of `h` using the implicit trapezoid -// method, if possible. -// @param t0 the time at the left end of the integration interval. -// @param h the maximum time increment to step forward. -// @param dx0 the time derivatives computed at time and state (t0, xt0). -// @param xtplus_ie x(t0+h) computed by the implicit Euler method. -// @param[out] xtplus x(t0+h) computed by the implicit trapezoid method on -// successful return. -// @returns `true` if the step was successful and `false` otherwise. -// @note The time and continuous state in the context are indeterminate upon -// exit. -template -bool ImplicitEulerIntegrator::StepImplicitTrapezoid( - const T& t0, const T& h, const VectorX& xt0, const VectorX& dx0, - const VectorX& xtplus_ie, VectorX* xtplus) { - using std::abs; - - DRAKE_LOGGER_DEBUG("StepImplicitTrapezoid(h={}) t={}", h, t0); - - // Set g for the implicit trapezoid method. - // Define g(x(t+h)) ≡ x(t0+h) - x(t0) - h/2 (f(t0,x(t0)) + f(t0+h,x(t0+h)) and - // evaluate it at the current x(t+h). - Context* context = this->get_mutable_context(); - std::function()> g = - [&xt0, h, &dx0, context, this]() { - return (context->get_continuous_state().CopyToVector() - xt0 - h/2 * - (dx0 + this->EvalTimeDerivatives( - *context).CopyToVector().eval())).eval(); - }; - - // Store statistics before calling StepAbstract(). The difference between - // the modified statistics and the stored statistics will be used to compute - // the trapezoid method-specific statistics. - int stored_num_jacobian_evaluations = this->get_num_jacobian_evaluations(); - int stored_num_iter_factorizations = - this->get_num_iteration_matrix_factorizations(); - int64_t stored_num_function_evaluations = - this->get_num_derivative_evaluations(); - int64_t stored_num_jacobian_function_evaluations = - this->get_num_derivative_evaluations_for_jacobian(); - int stored_num_nr_iterations = this->get_num_newton_raphson_iterations(); - - // Attempt to step. - bool success = StepAbstract(t0, h, xt0, g, - ComputeAndFactorImplicitTrapezoidIterationMatrix, - xtplus_ie, &itr_iteration_matrix_, xtplus); - - // Move statistics to implicit trapezoid-specific. - num_err_est_jacobian_reforms_ += - this->get_num_jacobian_evaluations() - stored_num_jacobian_evaluations; - num_err_est_iter_factorizations_ += - this->get_num_iteration_matrix_factorizations() - - stored_num_iter_factorizations; - num_err_est_function_evaluations_ += - this->get_num_derivative_evaluations() - stored_num_function_evaluations; - num_err_est_jacobian_function_evaluations_ += - this->get_num_derivative_evaluations_for_jacobian() - - stored_num_jacobian_function_evaluations; - num_err_est_nr_iterations_ += this->get_num_newton_raphson_iterations() - - stored_num_nr_iterations; - - return success; -} - -// Steps both implicit Euler and implicit trapezoid forward by h, if possible. -// @param t0 the time at the left end of the integration interval. -// @param h the integration step size to attempt. -// @param [out] xtplus_ie contains the Euler integrator solution (i.e., -// `x(t0+h)`) on return. -// @param [out] xtplus_itr contains the implicit trapezoid solution (i.e., -// `x(t0+h)`) on return. -// @returns `true` if the step of size `h` was successful, `false` otherwise. -template -bool ImplicitEulerIntegrator::AttemptStepPaired(const T& t0, const T& h, - const VectorX& xt0, VectorX* xtplus_ie, VectorX* xtplus_itr) { - using std::abs; - DRAKE_ASSERT(xtplus_ie); - DRAKE_ASSERT(xtplus_itr); - - // Compute the derivative at time and state (t0, x(t0)). NOTE: the derivative - // is calculated at this point (early on in the integration process) in order - // to reuse the derivative evaluation, via the cache, from the last - // integration step (if possible). - const VectorX dx0 = this->EvalTimeDerivatives( - this->get_context()).CopyToVector(); - - // Do the Euler step. - if (!StepImplicitEuler(t0, h, xt0, xtplus_ie)) { - DRAKE_LOGGER_DEBUG("Implicit Euler approach did not converge for " - "step size {}", h); - return false; - } - - // The error estimation process uses the implicit trapezoid method, which - // is defined as: - // x(t0+h) = x(t0) + h/2 (f(t0, x(t0) + f(t0+h, x(t0+h)) - // x(t0+h) from the implicit Euler method is presumably a good starting point. - - // The error estimate is derived as follows (thanks to Michael Sherman): - // x*(t0+h) = xᵢₑ(t0+h) + O(h²) [implicit Euler] - // = xₜᵣ(t0+h) + O(h³) [implicit trapezoid] - // where x*(t0+h) is the true (generally unknown) answer that we seek. - // This implies: - // xᵢₑ(t0+h) + O(h²) = xₜᵣ(t0+h) + O(h³). - // Given that the second order term subsumes the third order one, we have: - // xᵢₑ(t0+h) - xₜᵣ(t0+h) = O(h²). - - // Attempt to compute the implicit trapezoid solution. - if (StepImplicitTrapezoid(t0, h, xt0, dx0, *xtplus_ie, xtplus_itr)) { - // Reset the state to that computed by implicit Euler. - // TODO(edrumwri): Explore using the implicit trapezoid method solution - // instead as *the* solution, rather than the implicit - // Euler. Refer to [Lambert, 1991], Ch 6. - Context* context = this->get_mutable_context(); - context->SetTimeAndContinuousState(t0 + h, *xtplus_ie); - return true; - } else { - DRAKE_LOGGER_DEBUG("Implicit trapezoid approach FAILED with a step" - "size that succeeded on implicit Euler."); - return false; - } -} - -/// Takes a given step of the requested size, if possible. -/// @returns `true` if successful and `false` otherwise; on `true`, the time -/// and continuous state will be advanced in the context (e.g., from -/// t0 to t0 + h). On `false` return, the time and continuous state in -/// the context will be restored to its original value (at t0). -template -bool ImplicitEulerIntegrator::DoImplicitIntegratorStep(const T& h) { - // Save the current time and state. - Context* context = this->get_mutable_context(); - const T t0 = context->get_time(); - DRAKE_LOGGER_DEBUG("IE DoStep(h={}) t={}", h, t0); - - xt0_ = context->get_continuous_state().CopyToVector(); - xtplus_ie_.resize(xt0_.size()); - xtplus_tr_.resize(xt0_.size()); - - // If the requested h is less than the minimum step size, we'll advance time - // using an explicit Euler step. - if (h < this->get_working_minimum_step_size()) { - DRAKE_LOGGER_DEBUG("-- requested step too small, taking explicit " - "step instead"); - - // TODO(edrumwri): Investigate replacing this with an explicit trapezoid - // step, which would be expected to give better accuracy. - // The mitigating factor is that h is already small, so a - // test of, e.g., a square wave function, should quantify - // the improvement (if any). - - // The error estimation process for explicit Euler uses an explicit second - // order Runge-Kutta method so that the order of the asymptotic term - // matches that used for estimating the error of the implicit Euler - // integrator. - - // Compute the Euler step. - xdot_ = this->EvalTimeDerivatives(*context).CopyToVector(); - xtplus_ie_ = xt0_ + h * xdot_; - - // Compute the RK2 step. - const int evals_before_rk2 = rk2_->get_num_derivative_evaluations(); - if (!rk2_->IntegrateWithSingleFixedStepToTime(t0 + h)) { - throw std::runtime_error("Embedded RK2 integrator failed to take a single" - "fixed step to the requested time."); - } - - const int evals_after_rk2 = rk2_->get_num_derivative_evaluations(); - xtplus_tr_ = context->get_continuous_state().CopyToVector(); - - // Update the error estimation ODE counts. - num_err_est_function_evaluations_ += (evals_after_rk2 - evals_before_rk2); - - // Revert the state to that computed by explicit Euler. - context->SetTimeAndContinuousState(t0 + h, xtplus_ie_); - } else { - // Try taking the requested step. - bool success = AttemptStepPaired(t0, h, xt0_, &xtplus_ie_, &xtplus_tr_); - - // If the step was not successful, reset the time and state. - if (!success) { - context->SetTimeAndContinuousState(t0, xt0_); - return false; - } - } - - // Compute and update the error estimate. - err_est_vec_ = xtplus_ie_ - xtplus_tr_; - - // Update the caller-accessible error estimate. - this->get_mutable_error_estimate()->get_mutable_vector(). - SetFromVector(err_est_vec_); - - return true; -} - -} // namespace systems -} // namespace drake diff --git a/systems/analysis/implicit_euler_integrator.cc b/systems/analysis/implicit_euler_integrator.cc index 93270018a5cd..c23455802afd 100644 --- a/systems/analysis/implicit_euler_integrator.cc +++ b/systems/analysis/implicit_euler_integrator.cc @@ -1,13 +1,465 @@ #include "drake/systems/analysis/implicit_euler_integrator.h" -#include "drake/systems/analysis/implicit_euler_integrator-inl.h" -#include "drake/common/autodiff.h" +#include +#include +#include +#include + +#include "drake/common/drake_assert.h" +#include "drake/common/text_logging.h" +#include "drake/systems/analysis/runge_kutta2_integrator.h" namespace drake { namespace systems { -template class ImplicitEulerIntegrator; -template class ImplicitEulerIntegrator; + +template +void ImplicitEulerIntegrator::DoResetImplicitIntegratorStatistics() { + num_nr_iterations_ = 0; + num_err_est_nr_iterations_ = 0; + num_err_est_function_evaluations_ = 0; + num_err_est_jacobian_function_evaluations_ = 0; + num_err_est_jacobian_reforms_ = 0; + num_err_est_iter_factorizations_ = 0; +} + +template +void ImplicitEulerIntegrator::DoInitialize() { + using std::isnan; + + // Allocate storage for changes to state variables during Newton-Raphson. + dx_state_ = this->get_system().AllocateTimeDerivatives(); + + const double kDefaultAccuracy = 1e-1; // Good for this particular integrator. + const double kLoosestAccuracy = 5e-1; // Loosest accuracy is quite loose. + + // Set an artificial step size target, if not set already. + if (isnan(this->get_initial_step_size_target())) { + // Verify that maximum step size has been set. + if (isnan(this->get_maximum_step_size())) + throw std::logic_error("Neither initial step size target nor maximum " + "step size has been set!"); + + this->request_initial_step_size_target( + this->get_maximum_step_size()); + } + + // Sets the working accuracy to a good value. + double working_accuracy = this->get_target_accuracy(); + + // If the user asks for accuracy that is looser than the loosest this + // integrator can provide, use the integrator's loosest accuracy setting + // instead. + if (isnan(working_accuracy)) + working_accuracy = kDefaultAccuracy; + else if (working_accuracy > kLoosestAccuracy) + working_accuracy = kLoosestAccuracy; + this->set_accuracy_in_use(working_accuracy); + + // Reset the Jacobian matrix (so that recomputation is forced). + this->get_mutable_jacobian().resize(0, 0); + + // Initialize the embedded second order Runge-Kutta integrator. The maximum + // step size will be set to infinity because we will explicitly request the + // step sizes to be taken. + rk2_ = std::make_unique>( + this->get_system(), + std::numeric_limits::infinity() /* maximum step size */, + this->get_mutable_context()); +} + +template +void ImplicitEulerIntegrator::ComputeAndFactorImplicitEulerIterationMatrix( + const MatrixX& J, const T& h, + typename ImplicitIntegrator::IterationMatrix* iteration_matrix) { + const int n = J.rows(); + // TODO(edrumwri) Investigate using a move-type operation below. + // We form the iteration matrix in this particular way to avoid an O(n^2) + // subtraction as would be the case with: + // MatrixX::Identity(n, n) - J * h. + iteration_matrix->SetAndFactorIterationMatrix( + J * -h + MatrixX::Identity(n, n)); +} + +template +void ImplicitEulerIntegrator:: +ComputeAndFactorImplicitTrapezoidIterationMatrix( + const MatrixX& J, const T& h, + typename ImplicitIntegrator::IterationMatrix* iteration_matrix) { + const int n = J.rows(); + // TODO(edrumwri) Investigate using a move-type operation below. + // We form the iteration matrix in this particular way to avoid an O(n^2) + // subtraction as would be the case with: + // MatrixX::Identity(n, n) - J * h / 2. + iteration_matrix->SetAndFactorIterationMatrix( + J * (-h / 2.0) + MatrixX::Identity(n, n)); +} + +// Performs the bulk of the stepping computation for both implicit Euler and +// implicit trapezoid method; all those methods need to do is provide a +// residual function (`g`) and an iteration matrix computation and factorization +// function (`compute_and_factor_iteration_matrix`) specific to the +// particular integrator scheme and this method does the rest. +// @param t0 the time at the left end of the integration interval. +// @param h the integration step size (> 0) to attempt. +// @param xt0 the continuous state at t0. +// @param g the particular implicit function to compute the root of. +// @param compute_and_factor_iteration_matrix the function for computing and +// factorizing the iteration matrix. +// @param xtplus_guess the starting guess for x(t0+h) -- implicit Euler passes +// x(t0) since it has no better guess; implicit trapezoid passes +// implicit Euler's result for x(t0+h). +// @param[out] the iteration matrix to be used for the particular integration +// scheme (implicit Euler, implicit trapezoid), which will be +// computed and factored, if necessary. +// @param[out] the value for x(t0+h) on return. +// @param trial the attempt for this approach (1-4). StepAbstract() uses more +// computationally expensive methods as the trial numbers increase. +// @returns `true` if the method was successfully able to take an integration +// step of size h (or `false` otherwise). +// @note The time and continuous state in the context are indeterminate upon +// exit. +// TODO(edrumwri) Explicitly test this method's fallback logic (i.e., how it +// calls MaybeFreshenMatrices()) in a unit test). +template +bool ImplicitEulerIntegrator::StepAbstract( + const T& t0, const T& h, const VectorX& xt0, + const std::function()>& g, + const std::function&, const T&, + typename ImplicitIntegrator::IterationMatrix*)>& + compute_and_factor_iteration_matrix, + const VectorX& xtplus_guess, + typename ImplicitIntegrator::IterationMatrix* iteration_matrix, + VectorX* xtplus, int trial) { + using std::max; + using std::min; + + // Verify the trial number is valid. + DRAKE_ASSERT(trial >= 1 && trial <= 4); + + // Verify xtplus + DRAKE_ASSERT(xtplus && xtplus->size() == xt0.size()); + + DRAKE_LOGGER_DEBUG("StepAbstract() entered for t={}, h={}, trial={}", + t0, h, trial); + + // Start from the guess. + *xtplus = xtplus_guess; + DRAKE_LOGGER_DEBUG("Starting state: {}", xtplus->transpose()); + + // Advance the context time and state to compute derivatives at t0 + h. + const T tf = t0 + h; + Context* context = this->get_mutable_context(); + context->SetTimeAndContinuousState(tf, *xtplus); + + // Initialize the "last" state update norm; this will be used to detect + // convergence. + T last_dx_norm = std::numeric_limits::infinity(); + + // Calculate Jacobian and iteration matrices (and factorizations), as needed, + // around (t0, xt0). We do not do this calculation if full Newton is in use; + // the calculation will be performed at the beginning of the loop instead. + // TODO(edrumwri) Consider computing the Jacobian matrix around tf and/or + // xtplus. This would give a better Jacobian, but would + // complicate the logic, since the Jacobian would no longer + // (necessarily) be fresh upon fallback to a smaller step size. + if (!this->get_use_full_newton() && + !this->MaybeFreshenMatrices(t0, xt0, h, trial, + compute_and_factor_iteration_matrix, + iteration_matrix)) { + return false; + } + + // Do the Newton-Raphson iterations. + for (int i = 0; i < this->max_newton_raphson_iterations(); ++i) { + this->FreshenMatricesIfFullNewton(tf, *xtplus, h, + compute_and_factor_iteration_matrix, + iteration_matrix); + + // Evaluate the residual error using: + // g(x(t0+h)) = x(t0+h) - x(t0) - h f(t0+h,x(t0+h)). + VectorX goutput = g(); + + // Update the number of Newton-Raphson iterations. + num_nr_iterations_++; + + // Compute the state update using the equation A*x = -g(), where A is the + // iteration matrix. + // TODO(edrumwri): Allow caller to provide their own solver. + VectorX dx = iteration_matrix->Solve(-goutput); + + // Get the infinity norm of the weighted update vector. + dx_state_->get_mutable_vector().SetFromVector(dx); + T dx_norm = this->CalcStateChangeNorm(*dx_state_); + + // Update the state vector. + *xtplus += dx; + context->SetTimeAndContinuousState(tf, *xtplus); + + // Check for Newton-Raphson convergence. + typename ImplicitIntegrator::ConvergenceStatus status = + this->CheckNewtonConvergence(i, *xtplus, dx, dx_norm, last_dx_norm); + // If it converged, we're done. + if (status == ImplicitIntegrator::ConvergenceStatus::kConverged) + return true; + // If it diverged, we have to abort and try again. + if (status == ImplicitIntegrator::ConvergenceStatus::kDiverged) + break; + // Otherwise, continue to the next Newton-Raphson iteration. + DRAKE_DEMAND(status == + ImplicitIntegrator::ConvergenceStatus::kNotConverged); + + // Update the norm of the state update. + last_dx_norm = dx_norm; + } + + DRAKE_LOGGER_DEBUG("StepAbstract() convergence failed"); + + // If Jacobian and iteration matrix factorizations are not reused, there + // is nothing else we can try. Note that get_reuse() returns false if + // "full Newton-Raphson" mode is activated (see + // ImplicitIntegrator::get_use_full_newton()). + if (!this->get_reuse()) + return false; + + // Try StepAbstract again. That method will freshen Jacobians and iteration + // matrix factorizations as necessary. + return StepAbstract(t0, h, xt0, g, compute_and_factor_iteration_matrix, + xtplus_guess, iteration_matrix, xtplus, trial + 1); +} + +// Steps the system forward by a single step of at most h using the implicit +// Euler method. +// @param t0 the time at the left end of the integration interval. +// @param h the maximum time increment to step forward. +// @param xt0 the continuous state at t0. +// @param[out] the computed value for `x(t0+h)` on successful return. +// @returns `true` if the step of size `h` was successful, `false` otherwise. +// @note The time and continuous state in the context are indeterminate upon +// exit. +template +bool ImplicitEulerIntegrator::StepImplicitEuler(const T& t0, const T& h, + const VectorX& xt0, VectorX* xtplus) { + using std::abs; + + DRAKE_LOGGER_DEBUG("StepImplicitEuler(h={}) t={}", h, t0); + + // Set g for the implicit Euler method. + Context* context = this->get_mutable_context(); + std::function()> g = + [&xt0, h, context, this]() { + return (context->get_continuous_state().CopyToVector() - xt0 - + h * this->EvalTimeDerivatives(*context).CopyToVector()).eval(); + }; + + // Use the current state as the candidate value for the next state. + // [Hairer 1996] validates this choice (p. 120). + const VectorX& xtplus_guess = xt0; + + // Attempt the step. + return StepAbstract(t0, h, xt0, g, + ComputeAndFactorImplicitEulerIterationMatrix, + xtplus_guess, &ie_iteration_matrix_, &*xtplus); +} + +// Steps forward by a single step of `h` using the implicit trapezoid +// method, if possible. +// @param t0 the time at the left end of the integration interval. +// @param h the maximum time increment to step forward. +// @param dx0 the time derivatives computed at time and state (t0, xt0). +// @param xtplus_ie x(t0+h) computed by the implicit Euler method. +// @param[out] xtplus x(t0+h) computed by the implicit trapezoid method on +// successful return. +// @returns `true` if the step was successful and `false` otherwise. +// @note The time and continuous state in the context are indeterminate upon +// exit. +template +bool ImplicitEulerIntegrator::StepImplicitTrapezoid( + const T& t0, const T& h, const VectorX& xt0, const VectorX& dx0, + const VectorX& xtplus_ie, VectorX* xtplus) { + using std::abs; + + DRAKE_LOGGER_DEBUG("StepImplicitTrapezoid(h={}) t={}", h, t0); + + // Set g for the implicit trapezoid method. + // Define g(x(t+h)) ≡ x(t0+h) - x(t0) - h/2 (f(t0,x(t0)) + f(t0+h,x(t0+h)) and + // evaluate it at the current x(t+h). + Context* context = this->get_mutable_context(); + std::function()> g = + [&xt0, h, &dx0, context, this]() { + return (context->get_continuous_state().CopyToVector() - xt0 - h/2 * + (dx0 + this->EvalTimeDerivatives( + *context).CopyToVector().eval())).eval(); + }; + + // Store statistics before calling StepAbstract(). The difference between + // the modified statistics and the stored statistics will be used to compute + // the trapezoid method-specific statistics. + int stored_num_jacobian_evaluations = this->get_num_jacobian_evaluations(); + int stored_num_iter_factorizations = + this->get_num_iteration_matrix_factorizations(); + int64_t stored_num_function_evaluations = + this->get_num_derivative_evaluations(); + int64_t stored_num_jacobian_function_evaluations = + this->get_num_derivative_evaluations_for_jacobian(); + int stored_num_nr_iterations = this->get_num_newton_raphson_iterations(); + + // Attempt to step. + bool success = StepAbstract(t0, h, xt0, g, + ComputeAndFactorImplicitTrapezoidIterationMatrix, + xtplus_ie, &itr_iteration_matrix_, xtplus); + + // Move statistics to implicit trapezoid-specific. + num_err_est_jacobian_reforms_ += + this->get_num_jacobian_evaluations() - stored_num_jacobian_evaluations; + num_err_est_iter_factorizations_ += + this->get_num_iteration_matrix_factorizations() - + stored_num_iter_factorizations; + num_err_est_function_evaluations_ += + this->get_num_derivative_evaluations() - stored_num_function_evaluations; + num_err_est_jacobian_function_evaluations_ += + this->get_num_derivative_evaluations_for_jacobian() - + stored_num_jacobian_function_evaluations; + num_err_est_nr_iterations_ += this->get_num_newton_raphson_iterations() - + stored_num_nr_iterations; + + return success; +} + +// Steps both implicit Euler and implicit trapezoid forward by h, if possible. +// @param t0 the time at the left end of the integration interval. +// @param h the integration step size to attempt. +// @param [out] xtplus_ie contains the Euler integrator solution (i.e., +// `x(t0+h)`) on return. +// @param [out] xtplus_itr contains the implicit trapezoid solution (i.e., +// `x(t0+h)`) on return. +// @returns `true` if the step of size `h` was successful, `false` otherwise. +template +bool ImplicitEulerIntegrator::AttemptStepPaired(const T& t0, const T& h, + const VectorX& xt0, VectorX* xtplus_ie, VectorX* xtplus_itr) { + using std::abs; + DRAKE_ASSERT(xtplus_ie); + DRAKE_ASSERT(xtplus_itr); + + // Compute the derivative at time and state (t0, x(t0)). NOTE: the derivative + // is calculated at this point (early on in the integration process) in order + // to reuse the derivative evaluation, via the cache, from the last + // integration step (if possible). + const VectorX dx0 = this->EvalTimeDerivatives( + this->get_context()).CopyToVector(); + + // Do the Euler step. + if (!StepImplicitEuler(t0, h, xt0, xtplus_ie)) { + DRAKE_LOGGER_DEBUG("Implicit Euler approach did not converge for " + "step size {}", h); + return false; + } + + // The error estimation process uses the implicit trapezoid method, which + // is defined as: + // x(t0+h) = x(t0) + h/2 (f(t0, x(t0) + f(t0+h, x(t0+h)) + // x(t0+h) from the implicit Euler method is presumably a good starting point. + + // The error estimate is derived as follows (thanks to Michael Sherman): + // x*(t0+h) = xᵢₑ(t0+h) + O(h²) [implicit Euler] + // = xₜᵣ(t0+h) + O(h³) [implicit trapezoid] + // where x*(t0+h) is the true (generally unknown) answer that we seek. + // This implies: + // xᵢₑ(t0+h) + O(h²) = xₜᵣ(t0+h) + O(h³). + // Given that the second order term subsumes the third order one, we have: + // xᵢₑ(t0+h) - xₜᵣ(t0+h) = O(h²). + + // Attempt to compute the implicit trapezoid solution. + if (StepImplicitTrapezoid(t0, h, xt0, dx0, *xtplus_ie, xtplus_itr)) { + // Reset the state to that computed by implicit Euler. + // TODO(edrumwri): Explore using the implicit trapezoid method solution + // instead as *the* solution, rather than the implicit + // Euler. Refer to [Lambert, 1991], Ch 6. + Context* context = this->get_mutable_context(); + context->SetTimeAndContinuousState(t0 + h, *xtplus_ie); + return true; + } else { + DRAKE_LOGGER_DEBUG("Implicit trapezoid approach FAILED with a step" + "size that succeeded on implicit Euler."); + return false; + } +} + +// Takes a given step of the requested size, if possible. +// @returns `true` if successful and `false` otherwise; on `true`, the time +// and continuous state will be advanced in the context (e.g., from +// t0 to t0 + h). On `false` return, the time and continuous state in +// the context will be restored to its original value (at t0). +template +bool ImplicitEulerIntegrator::DoImplicitIntegratorStep(const T& h) { + // Save the current time and state. + Context* context = this->get_mutable_context(); + const T t0 = context->get_time(); + DRAKE_LOGGER_DEBUG("IE DoStep(h={}) t={}", h, t0); + + xt0_ = context->get_continuous_state().CopyToVector(); + xtplus_ie_.resize(xt0_.size()); + xtplus_tr_.resize(xt0_.size()); + + // If the requested h is less than the minimum step size, we'll advance time + // using an explicit Euler step. + if (h < this->get_working_minimum_step_size()) { + DRAKE_LOGGER_DEBUG("-- requested step too small, taking explicit " + "step instead"); + + // TODO(edrumwri): Investigate replacing this with an explicit trapezoid + // step, which would be expected to give better accuracy. + // The mitigating factor is that h is already small, so a + // test of, e.g., a square wave function, should quantify + // the improvement (if any). + + // The error estimation process for explicit Euler uses an explicit second + // order Runge-Kutta method so that the order of the asymptotic term + // matches that used for estimating the error of the implicit Euler + // integrator. + + // Compute the Euler step. + xdot_ = this->EvalTimeDerivatives(*context).CopyToVector(); + xtplus_ie_ = xt0_ + h * xdot_; + + // Compute the RK2 step. + const int evals_before_rk2 = rk2_->get_num_derivative_evaluations(); + if (!rk2_->IntegrateWithSingleFixedStepToTime(t0 + h)) { + throw std::runtime_error("Embedded RK2 integrator failed to take a single" + "fixed step to the requested time."); + } + + const int evals_after_rk2 = rk2_->get_num_derivative_evaluations(); + xtplus_tr_ = context->get_continuous_state().CopyToVector(); + + // Update the error estimation ODE counts. + num_err_est_function_evaluations_ += (evals_after_rk2 - evals_before_rk2); + + // Revert the state to that computed by explicit Euler. + context->SetTimeAndContinuousState(t0 + h, xtplus_ie_); + } else { + // Try taking the requested step. + bool success = AttemptStepPaired(t0, h, xt0_, &xtplus_ie_, &xtplus_tr_); + + // If the step was not successful, reset the time and state. + if (!success) { + context->SetTimeAndContinuousState(t0, xt0_); + return false; + } + } + + // Compute and update the error estimate. + err_est_vec_ = xtplus_ie_ - xtplus_tr_; + + // Update the caller-accessible error estimate. + this->get_mutable_error_estimate()->get_mutable_vector(). + SetFromVector(err_est_vec_); + + return true; +} + } // namespace systems } // namespace drake +DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( + class drake::systems::ImplicitEulerIntegrator) diff --git a/systems/analysis/implicit_euler_integrator.h b/systems/analysis/implicit_euler_integrator.h index 76e1a0aaec2a..f869f7b4ba10 100644 --- a/systems/analysis/implicit_euler_integrator.h +++ b/systems/analysis/implicit_euler_integrator.h @@ -1,13 +1,9 @@ #pragma once -#include #include -#include - -#include +#include "drake/common/default_scalars.h" #include "drake/common/drake_copyable.h" -#include "drake/math/autodiff_gradient.h" #include "drake/systems/analysis/implicit_integrator.h" #include "drake/systems/analysis/runge_kutta2_integrator.h" @@ -175,5 +171,9 @@ class ImplicitEulerIntegrator final : public ImplicitIntegrator { int64_t num_err_est_jacobian_function_evaluations_{0}; int64_t num_err_est_nr_iterations_{0}; }; + } // namespace systems } // namespace drake + +DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( + class drake::systems::ImplicitEulerIntegrator) diff --git a/systems/analysis/implicit_integrator.cc b/systems/analysis/implicit_integrator.cc index 4e62b7d671af..5a722e4cf500 100644 --- a/systems/analysis/implicit_integrator.cc +++ b/systems/analysis/implicit_integrator.cc @@ -1,7 +1,438 @@ #include "drake/systems/analysis/implicit_integrator.h" +#include +#include + #include "drake/common/autodiff.h" -#include "drake/common/default_scalars.h" +#include "drake/common/drake_assert.h" +#include "drake/common/text_logging.h" +#include "drake/math/autodiff_gradient.h" + +namespace drake { +namespace systems { + +template +void ImplicitIntegrator::DoResetStatistics() { + num_iter_factorizations_ = 0; + num_jacobian_function_evaluations_ = 0; + num_jacobian_evaluations_ = 0; + DoResetImplicitIntegratorStatistics(); +} + +// Computes the Jacobian of the ordinary differential equations around time +// and continuous state `(t, xt)` using automatic differentiation. +// @param system The dynamical system. +// @param t the time around which to compute the Jacobian matrix. +// @param xt the continuous state around which to compute the Jacobian matrix. +// @param context the Context of the system, at time and continuous state +// unknown. +// @param [out] the Jacobian matrix around time and state `(t, xt)`. +// @note The continuous state will be indeterminate on return. +template +void ImplicitIntegrator::ComputeAutoDiffJacobian( + const System& system, const T& t, const VectorX& xt, + const Context& context, MatrixX* J) { + DRAKE_LOGGER_DEBUG(" ImplicitIntegrator Compute Autodiff Jacobian t={}", t); + // Create AutoDiff versions of the state vector. + VectorX a_xt = xt; + + // Set the size of the derivatives and prepare for Jacobian calculation. + const int n_state_dim = a_xt.size(); + for (int i = 0; i < n_state_dim; ++i) + a_xt[i].derivatives() = VectorX::Unit(n_state_dim, i); + + // Get the system and the context in AutoDiffable format. Inputs must also + // be copied to the context used by the AutoDiff'd system (which is + // accomplished using FixInputPortsFrom()). + // TODO(edrumwri): Investigate means for moving as many of the operations + // below offline (or with lower frequency than once-per- + // Jacobian calculation) as is possible. These operations + // are likely to be expensive. + const auto adiff_system = system.ToAutoDiffXd(); + std::unique_ptr> adiff_context = adiff_system-> + AllocateContext(); + adiff_context->SetTimeStateAndParametersFrom(context); + adiff_system->FixInputPortsFrom(system, context, adiff_context.get()); + adiff_context->SetTime(t); + + // Set the continuous state in the context. + adiff_context->SetContinuousState(a_xt); + + // Evaluate the derivatives at that state. + const VectorX result = + this->EvalTimeDerivatives(*adiff_system, *adiff_context).CopyToVector(); + + *J = math::autoDiffToGradientMatrix(result); +} + +// Computes the Jacobian of the ordinary differential equations around time +// and continuous state `(t, xt)` using a first-order forward difference (i.e., +// numerical differentiation). +// @param system The dynamical system. +// @param t the time around which to compute the Jacobian matrix. +// @param xt the continuous state around which to compute the Jacobian matrix. +// @param context the Context of the system, at time and continuous state +// unknown. +// @param [out] the Jacobian matrix around time and state `(t, xt)`. +// @note The continuous state will be indeterminate on return. +template +void ImplicitIntegrator::ComputeForwardDiffJacobian( + const System&, const T& t, const VectorX& xt, Context* context, + MatrixX* J) { + using std::abs; + + // Set epsilon to the square root of machine precision. + const double eps = std::sqrt(std::numeric_limits::epsilon()); + + // Get the number of continuous state variables xt. + const int n = context->num_continuous_states(); + + DRAKE_LOGGER_DEBUG( + " ImplicitIntegrator Compute Forwarddiff {}-Jacobian t={}", n, t); + DRAKE_LOGGER_DEBUG( + " computing from state {}", xt.transpose()); + + // Initialize the Jacobian. + J->resize(n, n); + + // Evaluate f(t,xt). + context->SetTimeAndContinuousState(t, xt); + const VectorX f = this->EvalTimeDerivatives(*context).CopyToVector(); + + // Compute the Jacobian. + VectorX xt_prime = xt; + for (int i = 0; i < n; ++i) { + // Compute a good increment to the dimension using approximately 1/eps + // digits of precision. Note that if |xt| is large, the increment will + // be large as well. If |xt| is small, the increment will be no smaller + // than eps. + const T abs_xi = abs(xt(i)); + T dxi(abs_xi); + if (dxi <= 1) { + // When |xt[i]| is small, increment will be eps. + dxi = eps; + } else { + // |xt[i]| not small; make increment a fraction of |xt[i]|. + dxi = eps * abs_xi; + } + + // Update xt', minimizing the effect of roundoff error by ensuring that + // x and dx differ by an exactly representable number. See p. 192 of + // Press, W., Teukolsky, S., Vetterling, W., and Flannery, P. Numerical + // Recipes in C++, 2nd Ed., Cambridge University Press, 2002. + xt_prime(i) = xt(i) + dxi; + dxi = xt_prime(i) - xt(i); + + // TODO(sherm1) This is invalidating q, v, and z but we only changed one. + // Switch to a method that invalides just the relevant + // partition, and ideally modify only the one changed element. + // Compute f' and set the relevant column of the Jacobian matrix. + context->SetTimeAndContinuousState(t, xt_prime); + J->col(i) = (this->EvalTimeDerivatives(*context).CopyToVector() - f) / dxi; + + // Reset xt' to xt. + xt_prime(i) = xt(i); + } +} + +// Computes the Jacobian of the ordinary differential equations around time +// and continuous state `(t, xt)` using a second-order central difference (i.e., +// numerical differentiation). +// @param system The dynamical system. +// @param t the time around which to compute the Jacobian matrix. +// @param xt the continuous state around which to compute the Jacobian matrix. +// @param context the Context of the system, at time and continuous state +// unknown. +// @param [out] the Jacobian matrix around time and state `(t, xt)`. +// @note The continuous state will be indeterminate on return. +template +void ImplicitIntegrator::ComputeCentralDiffJacobian( + const System&, const T& t, const VectorX& xt, Context* context, + MatrixX* J) { + using std::abs; + + // Cube root of machine precision (indicated by theory) seems a bit coarse. + // Pick power of eps halfway between 6/12 (i.e., 1/2) and 4/12 (i.e., 1/3). + const double eps = std::pow(std::numeric_limits::epsilon(), 5.0/12); + + // Get the number of continuous state variables xt. + const int n = context->num_continuous_states(); + + DRAKE_LOGGER_DEBUG( + " ImplicitIntegrator Compute Centraldiff {}-Jacobian t={}", n, t); + + // Initialize the Jacobian. + J->resize(n, n); + + // Evaluate f(t,xt). + context->SetTimeAndContinuousState(t, xt); + const VectorX f = this->EvalTimeDerivatives(*context).CopyToVector(); + + // Compute the Jacobian. + VectorX xt_prime = xt; + for (int i = 0; i < n; ++i) { + // Compute a good increment to the dimension using approximately 1/eps + // digits of precision. Note that if |xt| is large, the increment will + // be large as well. If |xt| is small, the increment will be no smaller + // than eps. + const T abs_xi = abs(xt(i)); + T dxi(abs_xi); + if (dxi <= 1) { + // When |xt[i]| is small, increment will be eps. + dxi = eps; + } else { + // |xt[i]| not small; make increment a fraction of |xt[i]|. + dxi = eps * abs_xi; + } + + // Update xt', minimizing the effect of roundoff error, by ensuring that + // x and dx differ by an exactly representable number. See p. 192 of + // Press, W., Teukolsky, S., Vetterling, W., and Flannery, P. Numerical + // Recipes in C++, 2nd Ed., Cambridge University Press, 2002. + xt_prime(i) = xt(i) + dxi; + const T dxi_plus = xt_prime(i) - xt(i); + + // TODO(sherm1) This is invalidating q, v, and z but we only changed one. + // Switch to a method that invalides just the relevant + // partition, and ideally modify only the one changed element. + // Compute f(x+dx). + context->SetContinuousState(xt_prime); + VectorX fprime_plus = this->EvalTimeDerivatives(*context).CopyToVector(); + + // Update xt' again, minimizing the effect of roundoff error. + xt_prime(i) = xt(i) - dxi; + const T dxi_minus = xt(i) - xt_prime(i); + + // Compute f(x-dx). + context->SetContinuousState(xt_prime); + VectorX fprime_minus = this->EvalTimeDerivatives( + *context).CopyToVector(); + + // Set the Jacobian column. + J->col(i) = (fprime_plus - fprime_minus) / (dxi_plus + dxi_minus); + + // Reset xt' to xt. + xt_prime(i) = xt(i); + } +} + +// Factors a dense matrix (the iteration matrix) using LU factorization, +// which should be faster than the QR factorization used in the specialized +// template method immediately below. +template +void ImplicitIntegrator::IterationMatrix::SetAndFactorIterationMatrix( + const MatrixX& iteration_matrix) { + LU_.compute(iteration_matrix); + matrix_factored_ = true; +} + +// Solves a linear system Ax = b for x using the iteration matrix (A) +// factored using LU decomposition. +// @see Factor() +template +VectorX ImplicitIntegrator::IterationMatrix::Solve( + const VectorX& b) const { + return LU_.solve(b); +} + +template +typename ImplicitIntegrator::ConvergenceStatus +ImplicitIntegrator::CheckNewtonConvergence( + int iteration, const VectorX& xtplus, const VectorX& dx, + const T& dx_norm, const T& last_dx_norm) const { + // The check below looks for convergence by identifying cases where the + // update to the state results in no change. + // Note: Since we are performing this check at the end of the iteration, + // after xtplus has been updated, we also know that there is at least some + // change to the state, no matter how small, on a non-stationary system. + // Future maintainers should make sure this check only occurs after a change + // has been made to the state. + if (this->IsUpdateZero(xtplus, dx)) { + DRAKE_LOGGER_DEBUG("magnitude of state update indicates convergence"); + return ConvergenceStatus::kConverged; + } + + // Compute the convergence rate and check convergence. + // [Hairer, 1996] notes that this convergence strategy should only be applied + // after *at least* two iterations (p. 121). In practice, we find that it + // needs to run at least three iterations otherwise some error-controlled runs + // may choke, hence we check if iteration > 1. + if (iteration > 1) { + // TODO(edrumwri) Hairer's RADAU5 implementation (allegedly) uses + // theta = sqrt(dx[k] / dx[k-2]) while DASSL uses + // theta = pow(dx[k] / dx[0], 1/k), so investigate setting + // theta to these alternative values for minimizing convergence failures. + const T theta = dx_norm / last_dx_norm; + const T eta = theta / (1 - theta); + DRAKE_LOGGER_DEBUG("Newton-Raphson loop {} theta: {}, eta: {}", + iteration, theta, eta); + + // Look for divergence. + if (theta > 1) { + DRAKE_LOGGER_DEBUG("Newton-Raphson divergence detected"); + return ConvergenceStatus::kDiverged; + } + + // Look for convergence using Equation IV.8.10 from [Hairer, 1996]. + // [Hairer, 1996] determined values of kappa in [0.01, 0.1] work most + // efficiently on a number of test problems with *Radau5* (a fifth order + // implicit integrator), p. 121. We select a value halfway in-between. + const double kappa = 0.05; + const double k_dot_tol = kappa * this->get_accuracy_in_use(); + if (eta * dx_norm < k_dot_tol) { + DRAKE_LOGGER_DEBUG("Newton-Raphson converged; η = {}", eta); + return ConvergenceStatus::kConverged; + } + } + + return ConvergenceStatus::kNotConverged; +} + + +template +bool ImplicitIntegrator::IsBadJacobian(const MatrixX& J) const { + return !J.allFinite(); +} + +// Compute the partial derivative of the ordinary differential equations with +// respect to the state variables for a given x(t). +// @post the context's time and continuous state will be temporarily set during +// this call (and then reset to their original values) on return. +template +const MatrixX& ImplicitIntegrator::CalcJacobian(const T& t, + const VectorX& x) { + // We change the context but will change it back. + Context* context = this->get_mutable_context(); + + // Get the current time and state. + const T t_current = context->get_time(); + const VectorX x_current = context->get_continuous_state_vector(). + CopyToVector(); + + // Update the time and state. + context->SetTimeAndContinuousState(t, x); + num_jacobian_evaluations_++; + + // Get the current number of ODE evaluations. + int64_t current_ODE_evals = this->get_num_derivative_evaluations(); + + // Get a the system. + const System& system = this->get_system(); + + // TODO(edrumwri): Give the caller the option to provide their own Jacobian. + [this, context, &system, &t, &x]() { + switch (jacobian_scheme_) { + case JacobianComputationScheme::kForwardDifference: + ComputeForwardDiffJacobian(system, t, x, &*context, &J_); + break; + + case JacobianComputationScheme::kCentralDifference: + ComputeCentralDiffJacobian(system, t, x, &*context, &J_); + break; + + case JacobianComputationScheme::kAutomatic: + ComputeAutoDiffJacobian(system, t, x, *context, &J_); + break; + } + }(); + + // Use the new number of ODE evaluations to determine the number of Jacobian + // evaluations. + num_jacobian_function_evaluations_ += this->get_num_derivative_evaluations() + - current_ODE_evals; + + // Reset the time and state. + context->SetTimeAndContinuousState(t_current, x_current); + + return J_; +} + +template +void ImplicitIntegrator::FreshenMatricesIfFullNewton( + const T& t, const VectorX& xt, const T& h, + const std::function&, const T&, + typename ImplicitIntegrator::IterationMatrix*)>& + compute_and_factor_iteration_matrix, + typename ImplicitIntegrator::IterationMatrix* iteration_matrix) { + DRAKE_DEMAND(iteration_matrix); + + // Return immediately if full-Newton is not in use. + if (!get_use_full_newton()) return; + + // Compute the initial Jacobian and iteration matrices and factor them. + MatrixX& J = get_mutable_jacobian(); + J = CalcJacobian(t, xt); + ++num_iter_factorizations_; + compute_and_factor_iteration_matrix(J, h, iteration_matrix); +} + +template +bool ImplicitIntegrator::MaybeFreshenMatrices( + const T& t, const VectorX& xt, const T& h, int trial, + const std::function&, const T&, + typename ImplicitIntegrator::IterationMatrix*)>& + compute_and_factor_iteration_matrix, + typename ImplicitIntegrator::IterationMatrix* iteration_matrix) { + // Compute the initial Jacobian and iteration matrices and factor them, if + // necessary. + MatrixX& J = get_mutable_jacobian(); + if (!get_reuse() || J.rows() == 0 || IsBadJacobian(J)) { + J = CalcJacobian(t, xt); + ++num_iter_factorizations_; + compute_and_factor_iteration_matrix(J, h, iteration_matrix); + return true; // Indicate success. + } + + // Reuse is activated, Jacobian is fully sized, and Jacobian is not "bad". + // If the iteration matrix has not been set and factored, do only that. + if (!iteration_matrix->matrix_factored()) { + ++num_iter_factorizations_; + compute_and_factor_iteration_matrix(J, h, iteration_matrix); + return true; // Indicate success. + } + + switch (trial) { + case 1: + // For the first trial, we do nothing: this will cause the Newton-Raphson + // process to use the last computed (and already factored) iteration + // matrix. + return true; // Indicate success. + + case 2: { + // For the second trial, we perform the (likely) next least expensive + // operation, re-constructing and factoring the iteration matrix. + ++num_iter_factorizations_; + compute_and_factor_iteration_matrix(J, h, iteration_matrix); + return true; + } + + case 3: { + // For the third trial, the Jacobian matrix may already be "fresh", + // meaning that there is nothing more that can be tried (Jacobian and + // iteration matrix are both fresh) and we need to indicate failure. + if (jacobian_is_fresh_) + return false; + + // Reform the Jacobian matrix and refactor the iteration matrix. + J = CalcJacobian(t, xt); + ++num_iter_factorizations_; + compute_and_factor_iteration_matrix(J, h, iteration_matrix); + return true; + + case 4: { + // Trial #4 indicates failure. + return false; + } + + default: + throw std::domain_error("Unexpected trial number."); + } + } +} + +} // namespace systems +} // namespace drake DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( - class ::drake::systems::ImplicitIntegrator) + class drake::systems::ImplicitIntegrator) + diff --git a/systems/analysis/implicit_integrator.h b/systems/analysis/implicit_integrator.h index 6c7aae30434b..60b74798b1af 100644 --- a/systems/analysis/implicit_integrator.h +++ b/systems/analysis/implicit_integrator.h @@ -7,9 +7,9 @@ #include +#include "drake/common/autodiff.h" #include "drake/common/default_scalars.h" #include "drake/common/drake_copyable.h" -#include "drake/math/autodiff_gradient.h" #include "drake/systems/analysis/integrator_base.h" namespace drake { @@ -446,14 +446,6 @@ class ImplicitIntegrator : public IntegratorBase { int64_t num_jacobian_function_evaluations_{0}; }; -template -void ImplicitIntegrator::DoResetStatistics() { - num_iter_factorizations_ = 0; - num_jacobian_function_evaluations_ = 0; - num_jacobian_evaluations_ = 0; - DoResetImplicitIntegratorStatistics(); -} - // We do not support computing the Jacobian matrix using automatic // differentiation when the scalar is already an AutoDiff type. // Note: must be declared inline because it's specialized and located in the @@ -467,213 +459,6 @@ inline void ImplicitIntegrator:: "AutoDiff'd ImplicitIntegrator"); } -// Computes the Jacobian of the ordinary differential equations around time -// and continuous state `(t, xt)` using automatic differentiation. -// @param system The dynamical system. -// @param t the time around which to compute the Jacobian matrix. -// @param xt the continuous state around which to compute the Jacobian matrix. -// @param context the Context of the system, at time and continuous state -// unknown. -// @param [out] the Jacobian matrix around time and state `(t, xt)`. -// @note The continuous state will be indeterminate on return. -template -void ImplicitIntegrator::ComputeAutoDiffJacobian( - const System& system, const T& t, const VectorX& xt, - const Context& context, MatrixX* J) { - DRAKE_LOGGER_DEBUG(" ImplicitIntegrator Compute Autodiff Jacobian t={}", t); - // Create AutoDiff versions of the state vector. - VectorX a_xt = xt; - - // Set the size of the derivatives and prepare for Jacobian calculation. - const int n_state_dim = a_xt.size(); - for (int i = 0; i < n_state_dim; ++i) - a_xt[i].derivatives() = VectorX::Unit(n_state_dim, i); - - // Get the system and the context in AutoDiffable format. Inputs must also - // be copied to the context used by the AutoDiff'd system (which is - // accomplished using FixInputPortsFrom()). - // TODO(edrumwri): Investigate means for moving as many of the operations - // below offline (or with lower frequency than once-per- - // Jacobian calculation) as is possible. These operations - // are likely to be expensive. - const auto adiff_system = system.ToAutoDiffXd(); - std::unique_ptr> adiff_context = adiff_system-> - AllocateContext(); - adiff_context->SetTimeStateAndParametersFrom(context); - adiff_system->FixInputPortsFrom(system, context, adiff_context.get()); - adiff_context->SetTime(t); - - // Set the continuous state in the context. - adiff_context->SetContinuousState(a_xt); - - // Evaluate the derivatives at that state. - const VectorX result = - this->EvalTimeDerivatives(*adiff_system, *adiff_context).CopyToVector(); - - *J = math::autoDiffToGradientMatrix(result); -} - -// Computes the Jacobian of the ordinary differential equations around time -// and continuous state `(t, xt)` using a first-order forward difference (i.e., -// numerical differentiation). -// @param system The dynamical system. -// @param t the time around which to compute the Jacobian matrix. -// @param xt the continuous state around which to compute the Jacobian matrix. -// @param context the Context of the system, at time and continuous state -// unknown. -// @param [out] the Jacobian matrix around time and state `(t, xt)`. -// @note The continuous state will be indeterminate on return. -template -void ImplicitIntegrator::ComputeForwardDiffJacobian( - const System&, const T& t, const VectorX& xt, Context* context, - MatrixX* J) { - using std::abs; - - // Set epsilon to the square root of machine precision. - const double eps = std::sqrt(std::numeric_limits::epsilon()); - - // Get the number of continuous state variables xt. - const int n = context->num_continuous_states(); - - DRAKE_LOGGER_DEBUG( - " ImplicitIntegrator Compute Forwarddiff {}-Jacobian t={}", n, t); - DRAKE_LOGGER_DEBUG( - " computing from state {}", xt.transpose()); - - // Initialize the Jacobian. - J->resize(n, n); - - // Evaluate f(t,xt). - context->SetTimeAndContinuousState(t, xt); - const VectorX f = this->EvalTimeDerivatives(*context).CopyToVector(); - - // Compute the Jacobian. - VectorX xt_prime = xt; - for (int i = 0; i < n; ++i) { - // Compute a good increment to the dimension using approximately 1/eps - // digits of precision. Note that if |xt| is large, the increment will - // be large as well. If |xt| is small, the increment will be no smaller - // than eps. - const T abs_xi = abs(xt(i)); - T dxi(abs_xi); - if (dxi <= 1) { - // When |xt[i]| is small, increment will be eps. - dxi = eps; - } else { - // |xt[i]| not small; make increment a fraction of |xt[i]|. - dxi = eps * abs_xi; - } - - // Update xt', minimizing the effect of roundoff error by ensuring that - // x and dx differ by an exactly representable number. See p. 192 of - // Press, W., Teukolsky, S., Vetterling, W., and Flannery, P. Numerical - // Recipes in C++, 2nd Ed., Cambridge University Press, 2002. - xt_prime(i) = xt(i) + dxi; - dxi = xt_prime(i) - xt(i); - - // TODO(sherm1) This is invalidating q, v, and z but we only changed one. - // Switch to a method that invalides just the relevant - // partition, and ideally modify only the one changed element. - // Compute f' and set the relevant column of the Jacobian matrix. - context->SetTimeAndContinuousState(t, xt_prime); - J->col(i) = (this->EvalTimeDerivatives(*context).CopyToVector() - f) / dxi; - - // Reset xt' to xt. - xt_prime(i) = xt(i); - } -} - -// Computes the Jacobian of the ordinary differential equations around time -// and continuous state `(t, xt)` using a second-order central difference (i.e., -// numerical differentiation). -// @param system The dynamical system. -// @param t the time around which to compute the Jacobian matrix. -// @param xt the continuous state around which to compute the Jacobian matrix. -// @param context the Context of the system, at time and continuous state -// unknown. -// @param [out] the Jacobian matrix around time and state `(t, xt)`. -// @note The continuous state will be indeterminate on return. -template -void ImplicitIntegrator::ComputeCentralDiffJacobian( - const System&, const T& t, const VectorX& xt, Context* context, - MatrixX* J) { - using std::abs; - - // Cube root of machine precision (indicated by theory) seems a bit coarse. - // Pick power of eps halfway between 6/12 (i.e., 1/2) and 4/12 (i.e., 1/3). - const double eps = std::pow(std::numeric_limits::epsilon(), 5.0/12); - - // Get the number of continuous state variables xt. - const int n = context->num_continuous_states(); - - DRAKE_LOGGER_DEBUG( - " ImplicitIntegrator Compute Centraldiff {}-Jacobian t={}", n, t); - - // Initialize the Jacobian. - J->resize(n, n); - - // Evaluate f(t,xt). - context->SetTimeAndContinuousState(t, xt); - const VectorX f = this->EvalTimeDerivatives(*context).CopyToVector(); - - // Compute the Jacobian. - VectorX xt_prime = xt; - for (int i = 0; i < n; ++i) { - // Compute a good increment to the dimension using approximately 1/eps - // digits of precision. Note that if |xt| is large, the increment will - // be large as well. If |xt| is small, the increment will be no smaller - // than eps. - const T abs_xi = abs(xt(i)); - T dxi(abs_xi); - if (dxi <= 1) { - // When |xt[i]| is small, increment will be eps. - dxi = eps; - } else { - // |xt[i]| not small; make increment a fraction of |xt[i]|. - dxi = eps * abs_xi; - } - - // Update xt', minimizing the effect of roundoff error, by ensuring that - // x and dx differ by an exactly representable number. See p. 192 of - // Press, W., Teukolsky, S., Vetterling, W., and Flannery, P. Numerical - // Recipes in C++, 2nd Ed., Cambridge University Press, 2002. - xt_prime(i) = xt(i) + dxi; - const T dxi_plus = xt_prime(i) - xt(i); - - // TODO(sherm1) This is invalidating q, v, and z but we only changed one. - // Switch to a method that invalides just the relevant - // partition, and ideally modify only the one changed element. - // Compute f(x+dx). - context->SetContinuousState(xt_prime); - VectorX fprime_plus = this->EvalTimeDerivatives(*context).CopyToVector(); - - // Update xt' again, minimizing the effect of roundoff error. - xt_prime(i) = xt(i) - dxi; - const T dxi_minus = xt(i) - xt_prime(i); - - // Compute f(x-dx). - context->SetContinuousState(xt_prime); - VectorX fprime_minus = this->EvalTimeDerivatives( - *context).CopyToVector(); - - // Set the Jacobian column. - J->col(i) = (fprime_plus - fprime_minus) / (dxi_plus + dxi_minus); - - // Reset xt' to xt. - xt_prime(i) = xt(i); - } -} - -// Factors a dense matrix (the iteration matrix) using LU factorization, -// which should be faster than the QR factorization used in the specialized -// template method immediately below. -template -void ImplicitIntegrator::IterationMatrix::SetAndFactorIterationMatrix( - const MatrixX& iteration_matrix) { - LU_.compute(iteration_matrix); - matrix_factored_ = true; -} - // Factors a dense matrix (the iteration matrix). This // AutoDiff-specialized method is necessary because Eigen's LU factorization, // which should be faster than the QR factorization used here, is not currently @@ -687,15 +472,6 @@ inline void ImplicitIntegrator::IterationMatrix:: matrix_factored_ = true; } -// Solves a linear system Ax = b for x using the iteration matrix (A) -// factored using LU decomposition. -// @see Factor() -template -VectorX ImplicitIntegrator::IterationMatrix::Solve( - const VectorX& b) const { - return LU_.solve(b); -} - // Solves the linear system Ax = b for x using the iteration matrix (A) // factored using QR decomposition. // @see Factor() @@ -708,203 +484,8 @@ ImplicitIntegrator::IterationMatrix::Solve( return QR_.solve(b); } -template -typename ImplicitIntegrator::ConvergenceStatus -ImplicitIntegrator::CheckNewtonConvergence( - int iteration, const VectorX& xtplus, const VectorX& dx, - const T& dx_norm, const T& last_dx_norm) const { - // The check below looks for convergence by identifying cases where the - // update to the state results in no change. - // Note: Since we are performing this check at the end of the iteration, - // after xtplus has been updated, we also know that there is at least some - // change to the state, no matter how small, on a non-stationary system. - // Future maintainers should make sure this check only occurs after a change - // has been made to the state. - if (this->IsUpdateZero(xtplus, dx)) { - DRAKE_LOGGER_DEBUG("magnitude of state update indicates convergence"); - return ConvergenceStatus::kConverged; - } - - // Compute the convergence rate and check convergence. - // [Hairer, 1996] notes that this convergence strategy should only be applied - // after *at least* two iterations (p. 121). In practice, we find that it - // needs to run at least three iterations otherwise some error-controlled runs - // may choke, hence we check if iteration > 1. - if (iteration > 1) { - // TODO(edrumwri) Hairer's RADAU5 implementation (allegedly) uses - // theta = sqrt(dx[k] / dx[k-2]) while DASSL uses - // theta = pow(dx[k] / dx[0], 1/k), so investigate setting - // theta to these alternative values for minimizing convergence failures. - const T theta = dx_norm / last_dx_norm; - const T eta = theta / (1 - theta); - DRAKE_LOGGER_DEBUG("Newton-Raphson loop {} theta: {}, eta: {}", - iteration, theta, eta); - - // Look for divergence. - if (theta > 1) { - DRAKE_LOGGER_DEBUG("Newton-Raphson divergence detected"); - return ConvergenceStatus::kDiverged; - } - - // Look for convergence using Equation IV.8.10 from [Hairer, 1996]. - // [Hairer, 1996] determined values of kappa in [0.01, 0.1] work most - // efficiently on a number of test problems with *Radau5* (a fifth order - // implicit integrator), p. 121. We select a value halfway in-between. - const double kappa = 0.05; - const double k_dot_tol = kappa * this->get_accuracy_in_use(); - if (eta * dx_norm < k_dot_tol) { - DRAKE_LOGGER_DEBUG("Newton-Raphson converged; η = {}", eta); - return ConvergenceStatus::kConverged; - } - } - - return ConvergenceStatus::kNotConverged; -} - - -template -bool ImplicitIntegrator::IsBadJacobian(const MatrixX& J) const { - return !J.allFinite(); -} - -// Compute the partial derivative of the ordinary differential equations with -// respect to the state variables for a given x(t). -// @post the context's time and continuous state will be temporarily set during -// this call (and then reset to their original values) on return. -template -const MatrixX& ImplicitIntegrator::CalcJacobian(const T& t, - const VectorX& x) { - // We change the context but will change it back. - Context* context = this->get_mutable_context(); - - // Get the current time and state. - const T t_current = context->get_time(); - const VectorX x_current = context->get_continuous_state_vector(). - CopyToVector(); - - // Update the time and state. - context->SetTimeAndContinuousState(t, x); - num_jacobian_evaluations_++; - - // Get the current number of ODE evaluations. - int64_t current_ODE_evals = this->get_num_derivative_evaluations(); - - // Get a the system. - const System& system = this->get_system(); - - // TODO(edrumwri): Give the caller the option to provide their own Jacobian. - [this, context, &system, &t, &x]() { - switch (jacobian_scheme_) { - case JacobianComputationScheme::kForwardDifference: - ComputeForwardDiffJacobian(system, t, x, &*context, &J_); - break; - - case JacobianComputationScheme::kCentralDifference: - ComputeCentralDiffJacobian(system, t, x, &*context, &J_); - break; - - case JacobianComputationScheme::kAutomatic: - ComputeAutoDiffJacobian(system, t, x, *context, &J_); - break; - } - }(); - - // Use the new number of ODE evaluations to determine the number of Jacobian - // evaluations. - num_jacobian_function_evaluations_ += this->get_num_derivative_evaluations() - - current_ODE_evals; - - // Reset the time and state. - context->SetTimeAndContinuousState(t_current, x_current); - - return J_; -} - -template -void ImplicitIntegrator::FreshenMatricesIfFullNewton( - const T& t, const VectorX& xt, const T& h, - const std::function&, const T&, - typename ImplicitIntegrator::IterationMatrix*)>& - compute_and_factor_iteration_matrix, - typename ImplicitIntegrator::IterationMatrix* iteration_matrix) { - DRAKE_DEMAND(iteration_matrix); - - // Return immediately if full-Newton is not in use. - if (!get_use_full_newton()) return; - - // Compute the initial Jacobian and iteration matrices and factor them. - MatrixX& J = get_mutable_jacobian(); - J = CalcJacobian(t, xt); - ++num_iter_factorizations_; - compute_and_factor_iteration_matrix(J, h, iteration_matrix); -} - -template -bool ImplicitIntegrator::MaybeFreshenMatrices( - const T& t, const VectorX& xt, const T& h, int trial, - const std::function&, const T&, - typename ImplicitIntegrator::IterationMatrix*)>& - compute_and_factor_iteration_matrix, - typename ImplicitIntegrator::IterationMatrix* iteration_matrix) { - // Compute the initial Jacobian and iteration matrices and factor them, if - // necessary. - MatrixX& J = get_mutable_jacobian(); - if (!get_reuse() || J.rows() == 0 || IsBadJacobian(J)) { - J = CalcJacobian(t, xt); - ++num_iter_factorizations_; - compute_and_factor_iteration_matrix(J, h, iteration_matrix); - return true; // Indicate success. - } - - // Reuse is activated, Jacobian is fully sized, and Jacobian is not "bad". - // If the iteration matrix has not been set and factored, do only that. - if (!iteration_matrix->matrix_factored()) { - ++num_iter_factorizations_; - compute_and_factor_iteration_matrix(J, h, iteration_matrix); - return true; // Indicate success. - } - - switch (trial) { - case 1: - // For the first trial, we do nothing: this will cause the Newton-Raphson - // process to use the last computed (and already factored) iteration - // matrix. - return true; // Indicate success. - - case 2: { - // For the second trial, we perform the (likely) next least expensive - // operation, re-constructing and factoring the iteration matrix. - ++num_iter_factorizations_; - compute_and_factor_iteration_matrix(J, h, iteration_matrix); - return true; - } - - case 3: { - // For the third trial, the Jacobian matrix may already be "fresh", - // meaning that there is nothing more that can be tried (Jacobian and - // iteration matrix are both fresh) and we need to indicate failure. - if (jacobian_is_fresh_) - return false; - - // Reform the Jacobian matrix and refactor the iteration matrix. - J = CalcJacobian(t, xt); - ++num_iter_factorizations_; - compute_and_factor_iteration_matrix(J, h, iteration_matrix); - return true; - - case 4: { - // Trial #4 indicates failure. - return false; - } - - default: - throw std::domain_error("Unexpected trial number."); - } - } -} - } // namespace systems } // namespace drake DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( - class ::drake::systems::ImplicitIntegrator) + class drake::systems::ImplicitIntegrator) diff --git a/systems/analysis/initial_value_problem-inl.h b/systems/analysis/initial_value_problem-inl.h deleted file mode 100644 index c8232a4e709c..000000000000 --- a/systems/analysis/initial_value_problem-inl.h +++ /dev/null @@ -1,277 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -#include "drake/systems/analysis/initial_value_problem.h" -#include "drake/systems/analysis/runge_kutta3_integrator-inl.h" -#include "drake/systems/framework/basic_vector.h" -#include "drake/systems/framework/continuous_state.h" -#include "drake/systems/framework/leaf_system.h" -#include "drake/systems/framework/vector_base.h" - -namespace drake { -namespace systems { - -namespace internal { - -/// A LeafSystem subclass used to describe parameterized ODE systems -/// i.e. d𝐱/dt = f(t, 𝐱; 𝐤) where f : t ⨯ 𝐱 → ℝⁿ, t ∈ ℝ , 𝐱 ∈ ℝⁿ, 𝐤 ∈ ℝᵐ. -/// The vector variable 𝐱 corresponds to the system state that is evolved -/// through time t by the function f, which is in turn parameterized by a -/// vector 𝐤. -/// -/// @tparam T The ℝ domain scalar type, which must be a valid Eigen scalar. -template -class ODESystem : public LeafSystem { - public: - DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(ODESystem); - - typedef typename InitialValueProblem::ODEFunction SystemFunction; - - /// Constructs a system that will use the given @p system_function, - /// parameterized as described by the @p param_model, to compute the - /// derivatives and advance the @p state_model. - /// - /// @remarks Here, the 'model' term has been borrowed from LeafSystem - /// terminology, where these vectors are used both to provide initial - /// values and to convey information about the dimensionality of the - /// variables involved. - /// - /// @param system_function The system function f(t, 𝐱; 𝐤). - /// @param state_model The state model vector 𝐱₀, with initial values. - /// @param param_model The parameter model vector 𝐤₀, with default values. - ODESystem(const SystemFunction& system_function, - const VectorX& state_model, - const VectorX& param_model); - - protected: - void DoCalcTimeDerivatives( - const Context& context, - ContinuousState* derivatives) const override; - - private: - // General ODE system d𝐱/dt = f(t, 𝐱; 𝐤) function. - const SystemFunction system_function_; -}; - - -template -ODESystem::ODESystem( - const typename ODESystem::SystemFunction& system_function, - const VectorX& state_model, const VectorX& param_model) - : system_function_(system_function) { - // Models system state after the given state model. - this->DeclareContinuousState(BasicVector(state_model)); - // Models system parameters after the given parameter model. - this->DeclareNumericParameter(BasicVector(param_model)); -} - -template -void ODESystem::DoCalcTimeDerivatives( - const Context& context, ContinuousState* derivatives) const { - // Retrieves the state vector. This cast is safe because the - // ContinuousState of a LeafSystem is flat i.e. it is just - // a BasicVector, and the implementation deals with LeafSystem - // instances only by design. - const BasicVector& state_vector = dynamic_cast&>( - context.get_continuous_state_vector()); - // Retrieves the parameter vector. - const BasicVector& parameter_vector = - context.get_numeric_parameter(0); - - // Retrieves the derivatives vector. This cast is safe because the - // ContinuousState of a LeafSystem is flat i.e. it is just - // a BasicVector, and the implementation deals with LeafSystem - // instances only by design. - BasicVector& derivatives_vector = - dynamic_cast&>(derivatives->get_mutable_vector()); - // Computes the derivatives vector using the given system function - // for the given time and state and with the given parameterization. - derivatives_vector.set_value(system_function_( - context.get_time(), state_vector.get_value(), - parameter_vector.get_value())); -} - -} // namespace internal - -template -const double InitialValueProblem::kDefaultAccuracy = 1e-4; - -template -const T InitialValueProblem::kInitialStepSize = static_cast(1e-4); - -template -const T InitialValueProblem::kMaxStepSize = static_cast(1e-1); - -template -InitialValueProblem::InitialValueProblem( - const ODEFunction& ode_function, - const SpecifiedValues& default_values) - : default_values_(default_values), - current_values_(default_values) { - // Checks that preconditions are met. - if (!default_values_.t0) { - throw std::logic_error("No default initial time t0 was given."); - } - if (!default_values_.x0) { - throw std::logic_error("No default initial state x0 was given."); - } - if (!default_values_.k) { - throw std::logic_error("No default parameters vector k was given."); - } - // Instantiates the system using the given defaults as models. - system_ = std::make_unique>( - ode_function, default_values_.x0.value(), default_values_.k.value()); - - // Allocates a new default integration context with the - // given default initial time. - context_ = system_->CreateDefaultContext(); - context_->SetTime(default_values_.t0.value()); - - // Instantiates an explicit RK3 integrator by default. - integrator_ = std::make_unique>( - *system_, context_.get()); - - // Sets step size and accuracy defaults. - integrator_->request_initial_step_size_target( - InitialValueProblem::kInitialStepSize); - integrator_->set_maximum_step_size( - InitialValueProblem::kMaxStepSize); - integrator_->set_target_accuracy( - InitialValueProblem::kDefaultAccuracy); -} - -template -VectorX InitialValueProblem::Solve( - const T& tf, const SpecifiedValues& values) const { - // Gets all values to solve with, either given or default, while - // checking that all preconditions hold. - const SpecifiedValues safe_values = SanitizeValuesOrThrow(tf, values); - - // Potentially invalidates the cache. - ResetCachedStateIfNecessary(tf, safe_values); - - // Initializes integrator if necessary. - if (!integrator_->is_initialized()) { - integrator_->Initialize(); - } - - // Integrates up to the requested time. - integrator_->IntegrateWithMultipleStepsToTime(tf); - - // Retrieves the state vector. This cast is safe because the - // ContinuousState of a LeafSystem is flat i.e. it is just - // a BasicVector, and the implementation deals with LeafSystem - // instances only by design. - const BasicVector& state_vector = dynamic_cast&>( - context_->get_continuous_state_vector()); - return state_vector.get_value(); -} - -template -void InitialValueProblem::ResetCachedState( - const SpecifiedValues& values) const { - // Sets context (initial) time. - context_->SetTime(values.t0.value()); - - // Sets context (initial) state. This cast is safe because the - // ContinuousState of a LeafSystem is flat i.e. it is just - // a BasicVector, and the implementation deals with LeafSystem - // instances only by design. - BasicVector& state_vector = dynamic_cast&>( - context_->get_mutable_continuous_state_vector()); - state_vector.set_value(values.x0.value()); - - // Sets context parameters. - BasicVector& parameter_vector = - context_->get_mutable_numeric_parameter(0); - parameter_vector.set_value(values.k.value()); - - // Keeps track of current step size and accuracy settings (regardless - // of whether these are actually used by the integrator instance or not). - const T max_step_size = integrator_->get_maximum_step_size(); - const T initial_step_size = integrator_->get_initial_step_size_target(); - const double target_accuracy = integrator_->get_target_accuracy(); - - // Resets the integrator internal state. - integrator_->Reset(); - - // Sets integrator settings again. - integrator_->set_maximum_step_size(max_step_size); - if (integrator_->supports_error_estimation()) { - // Specifies initial step and accuracy setting only if necessary. - integrator_->request_initial_step_size_target(initial_step_size); - integrator_->set_target_accuracy(target_accuracy); - } - // Keeps track of the current initial conditions and parameters - // for future cache invalidation. - current_values_ = values; -} - -template -void InitialValueProblem::ResetCachedStateIfNecessary( - const T& tf, const SpecifiedValues& values) const { - // Only resets cache if necessary, i.e. if either initial - // conditions or parameters have changed or if the time - // the IVP is to be solved for is in the past with respect - // to the integration context time. - if (values != current_values_ || tf < context_->get_time()) { - ResetCachedState(values); - } -} - -template -typename InitialValueProblem::SpecifiedValues -InitialValueProblem::SanitizeValuesOrThrow( - const T& tf, const SpecifiedValues& values) const { - SpecifiedValues safe_values; - safe_values.t0 = values.t0.has_value() ? values.t0 : default_values_.t0; - if (tf < safe_values.t0.value()) { - throw std::logic_error("Cannot solve IVP for a time" - " before the initial condition."); - } - safe_values.x0 = values.x0.has_value() ? values.x0 : default_values_.x0; - if (safe_values.x0.value().size() != default_values_.x0.value().size()) { - throw std::logic_error("IVP initial state vector x0 is" - " of the wrong dimension."); - } - safe_values.k = values.k.has_value() ? values.k : default_values_.k; - if (safe_values.k.value().size() != default_values_.k.value().size()) { - throw std::logic_error("IVP parameters vector k is " - " of the wrong dimension"); - } - return safe_values; -} - -template -std::unique_ptr> InitialValueProblem::DenseSolve( - const T& tf, const SpecifiedValues& values) const { - // Gets all values to solve with, either given or default, while - // checking that all preconditions hold. - const SpecifiedValues safe_values = SanitizeValuesOrThrow(tf, values); - - // Unconditionally invalidates the cache. All integration steps that - // take the IVP forward in time up to tf are necessary to build a - // DenseOutput. - ResetCachedState(safe_values); - - // Re-initialize integrator after cache invalidation. - integrator_->Initialize(); - - // Starts dense integration to build a dense output. - integrator_->StartDenseIntegration(); - - // Steps the integrator through the entire interval. - integrator_->IntegrateWithMultipleStepsToTime(tf); - - // Stops dense integration to prevent future updates to - // the dense output just built and yields it to the caller. - return integrator_->StopDenseIntegration(); -} - -} // namespace systems -} // namespace drake diff --git a/systems/analysis/initial_value_problem.cc b/systems/analysis/initial_value_problem.cc index dcc479d497d5..2f79ebaf0079 100644 --- a/systems/analysis/initial_value_problem.cc +++ b/systems/analysis/initial_value_problem.cc @@ -1,13 +1,272 @@ #include "drake/systems/analysis/initial_value_problem.h" -#include "drake/systems/analysis/initial_value_problem-inl.h" -#include "drake/common/default_scalars.h" +#include + +#include "drake/systems/analysis/runge_kutta3_integrator.h" +#include "drake/systems/framework/continuous_state.h" +#include "drake/systems/framework/leaf_system.h" namespace drake { namespace systems { +namespace { -DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( - class InitialValueProblem); +// A LeafSystem subclass used to describe parameterized ODE systems +// i.e. d𝐱/dt = f(t, 𝐱; 𝐤) where f : t ⨯ 𝐱 → ℝⁿ, t ∈ ℝ , 𝐱 ∈ ℝⁿ, 𝐤 ∈ ℝᵐ. +// The vector variable 𝐱 corresponds to the system state that is evolved +// through time t by the function f, which is in turn parameterized by a +// vector 𝐤. +// +// @tparam T The ℝ domain scalar type, which must be a valid Eigen scalar. +template +class ODESystem : public LeafSystem { + public: + DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(ODESystem); + + typedef typename InitialValueProblem::ODEFunction SystemFunction; + + // Constructs a system that will use the given @p system_function, + // parameterized as described by the @p param_model, to compute the + // derivatives and advance the @p state_model. + // + // @remarks Here, the 'model' term has been borrowed from LeafSystem + // terminology, where these vectors are used both to provide initial + // values and to convey information about the dimensionality of the + // variables involved. + // + // @param system_function The system function f(t, 𝐱; 𝐤). + // @param state_model The state model vector 𝐱₀, with initial values. + // @param param_model The parameter model vector 𝐤₀, with default values. + ODESystem(const SystemFunction& system_function, + const VectorX& state_model, + const VectorX& param_model); + + protected: + void DoCalcTimeDerivatives( + const Context& context, + ContinuousState* derivatives) const override; + + private: + // General ODE system d𝐱/dt = f(t, 𝐱; 𝐤) function. + const SystemFunction system_function_; +}; + + +template +ODESystem::ODESystem( + const typename ODESystem::SystemFunction& system_function, + const VectorX& state_model, const VectorX& param_model) + : system_function_(system_function) { + // Models system state after the given state model. + this->DeclareContinuousState(BasicVector(state_model)); + // Models system parameters after the given parameter model. + this->DeclareNumericParameter(BasicVector(param_model)); +} + +template +void ODESystem::DoCalcTimeDerivatives( + const Context& context, ContinuousState* derivatives) const { + // Retrieves the state vector. This cast is safe because the + // ContinuousState of a LeafSystem is flat i.e. it is just + // a BasicVector, and the implementation deals with LeafSystem + // instances only by design. + const BasicVector& state_vector = dynamic_cast&>( + context.get_continuous_state_vector()); + // Retrieves the parameter vector. + const BasicVector& parameter_vector = + context.get_numeric_parameter(0); + + // Retrieves the derivatives vector. This cast is safe because the + // ContinuousState of a LeafSystem is flat i.e. it is just + // a BasicVector, and the implementation deals with LeafSystem + // instances only by design. + BasicVector& derivatives_vector = + dynamic_cast&>(derivatives->get_mutable_vector()); + // Computes the derivatives vector using the given system function + // for the given time and state and with the given parameterization. + derivatives_vector.set_value(system_function_( + context.get_time(), state_vector.get_value(), + parameter_vector.get_value())); +} + +} // namespace + +template +const double InitialValueProblem::kDefaultAccuracy = 1e-4; + +template +const T InitialValueProblem::kInitialStepSize = static_cast(1e-4); + +template +const T InitialValueProblem::kMaxStepSize = static_cast(1e-1); + +template +InitialValueProblem::InitialValueProblem( + const ODEFunction& ode_function, + const SpecifiedValues& default_values) + : default_values_(default_values), + current_values_(default_values) { + // Checks that preconditions are met. + if (!default_values_.t0) { + throw std::logic_error("No default initial time t0 was given."); + } + if (!default_values_.x0) { + throw std::logic_error("No default initial state x0 was given."); + } + if (!default_values_.k) { + throw std::logic_error("No default parameters vector k was given."); + } + // Instantiates the system using the given defaults as models. + system_ = std::make_unique>( + ode_function, default_values_.x0.value(), default_values_.k.value()); + + // Allocates a new default integration context with the + // given default initial time. + context_ = system_->CreateDefaultContext(); + context_->SetTime(default_values_.t0.value()); + + // Instantiates an explicit RK3 integrator by default. + integrator_ = std::make_unique>( + *system_, context_.get()); + + // Sets step size and accuracy defaults. + integrator_->request_initial_step_size_target( + InitialValueProblem::kInitialStepSize); + integrator_->set_maximum_step_size( + InitialValueProblem::kMaxStepSize); + integrator_->set_target_accuracy( + InitialValueProblem::kDefaultAccuracy); +} + +template +VectorX InitialValueProblem::Solve( + const T& tf, const SpecifiedValues& values) const { + // Gets all values to solve with, either given or default, while + // checking that all preconditions hold. + const SpecifiedValues safe_values = SanitizeValuesOrThrow(tf, values); + + // Potentially invalidates the cache. + ResetCachedStateIfNecessary(tf, safe_values); + + // Initializes integrator if necessary. + if (!integrator_->is_initialized()) { + integrator_->Initialize(); + } + + // Integrates up to the requested time. + integrator_->IntegrateWithMultipleStepsToTime(tf); + + // Retrieves the state vector. This cast is safe because the + // ContinuousState of a LeafSystem is flat i.e. it is just + // a BasicVector, and the implementation deals with LeafSystem + // instances only by design. + const BasicVector& state_vector = dynamic_cast&>( + context_->get_continuous_state_vector()); + return state_vector.get_value(); +} + +template +void InitialValueProblem::ResetCachedState( + const SpecifiedValues& values) const { + // Sets context (initial) time. + context_->SetTime(values.t0.value()); + + // Sets context (initial) state. This cast is safe because the + // ContinuousState of a LeafSystem is flat i.e. it is just + // a BasicVector, and the implementation deals with LeafSystem + // instances only by design. + BasicVector& state_vector = dynamic_cast&>( + context_->get_mutable_continuous_state_vector()); + state_vector.set_value(values.x0.value()); + + // Sets context parameters. + BasicVector& parameter_vector = + context_->get_mutable_numeric_parameter(0); + parameter_vector.set_value(values.k.value()); + + // Keeps track of current step size and accuracy settings (regardless + // of whether these are actually used by the integrator instance or not). + const T max_step_size = integrator_->get_maximum_step_size(); + const T initial_step_size = integrator_->get_initial_step_size_target(); + const double target_accuracy = integrator_->get_target_accuracy(); + + // Resets the integrator internal state. + integrator_->Reset(); + + // Sets integrator settings again. + integrator_->set_maximum_step_size(max_step_size); + if (integrator_->supports_error_estimation()) { + // Specifies initial step and accuracy setting only if necessary. + integrator_->request_initial_step_size_target(initial_step_size); + integrator_->set_target_accuracy(target_accuracy); + } + // Keeps track of the current initial conditions and parameters + // for future cache invalidation. + current_values_ = values; +} + +template +void InitialValueProblem::ResetCachedStateIfNecessary( + const T& tf, const SpecifiedValues& values) const { + // Only resets cache if necessary, i.e. if either initial + // conditions or parameters have changed or if the time + // the IVP is to be solved for is in the past with respect + // to the integration context time. + if (values != current_values_ || tf < context_->get_time()) { + ResetCachedState(values); + } +} + +template +typename InitialValueProblem::SpecifiedValues +InitialValueProblem::SanitizeValuesOrThrow( + const T& tf, const SpecifiedValues& values) const { + SpecifiedValues safe_values; + safe_values.t0 = values.t0.has_value() ? values.t0 : default_values_.t0; + if (tf < safe_values.t0.value()) { + throw std::logic_error("Cannot solve IVP for a time" + " before the initial condition."); + } + safe_values.x0 = values.x0.has_value() ? values.x0 : default_values_.x0; + if (safe_values.x0.value().size() != default_values_.x0.value().size()) { + throw std::logic_error("IVP initial state vector x0 is" + " of the wrong dimension."); + } + safe_values.k = values.k.has_value() ? values.k : default_values_.k; + if (safe_values.k.value().size() != default_values_.k.value().size()) { + throw std::logic_error("IVP parameters vector k is " + " of the wrong dimension"); + } + return safe_values; +} + +template +std::unique_ptr> InitialValueProblem::DenseSolve( + const T& tf, const SpecifiedValues& values) const { + // Gets all values to solve with, either given or default, while + // checking that all preconditions hold. + const SpecifiedValues safe_values = SanitizeValuesOrThrow(tf, values); + + // Unconditionally invalidates the cache. All integration steps that + // take the IVP forward in time up to tf are necessary to build a + // DenseOutput. + ResetCachedState(safe_values); + + // Re-initialize integrator after cache invalidation. + integrator_->Initialize(); + + // Starts dense integration to build a dense output. + integrator_->StartDenseIntegration(); + + // Steps the integrator through the entire interval. + integrator_->IntegrateWithMultipleStepsToTime(tf); + + // Stops dense integration to prevent future updates to + // the dense output just built and yields it to the caller. + return integrator_->StopDenseIntegration(); +} } // namespace systems } // namespace drake + +DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( + class drake::systems::InitialValueProblem) diff --git a/systems/analysis/initial_value_problem.h b/systems/analysis/initial_value_problem.h index 50a7433f8d82..47df41322e7e 100644 --- a/systems/analysis/initial_value_problem.h +++ b/systems/analysis/initial_value_problem.h @@ -3,15 +3,14 @@ #include #include #include -#include +#include "drake/common/default_scalars.h" +#include "drake/common/drake_assert.h" #include "drake/common/drake_copyable.h" #include "drake/common/eigen_types.h" #include "drake/systems/analysis/dense_output.h" #include "drake/systems/analysis/integrator_base.h" #include "drake/systems/framework/context.h" -#include "drake/systems/framework/parameters.h" -#include "drake/systems/framework/vector_base.h" namespace drake { namespace systems { @@ -264,3 +263,6 @@ class InitialValueProblem { } // namespace systems } // namespace drake + +DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( + class drake::systems::InitialValueProblem) diff --git a/systems/analysis/integrator_base.cc b/systems/analysis/integrator_base.cc new file mode 100644 index 000000000000..52d057afaff4 --- /dev/null +++ b/systems/analysis/integrator_base.cc @@ -0,0 +1,468 @@ +#include "drake/systems/analysis/integrator_base.h" + +namespace drake { +namespace systems { + +template +bool IntegratorBase::StepOnceErrorControlledAtMost(const T& h_max) { + using std::isnan; + using std::min; + + // Verify that the integrator supports error estimates. + if (!supports_error_estimation()) { + throw std::logic_error("StepOnceErrorControlledAtMost() requires error " + "estimation."); + } + + // Save time, continuous variables, and time derivative because we'll possibly + // revert time and state. + const Context& context = get_context(); + const T current_time = context.get_time(); + VectorBase& xc = + get_mutable_context()->get_mutable_continuous_state_vector(); + xc0_save_ = xc.CopyToVector(); + + // Set the step size to attempt. + T step_size_to_attempt = get_ideal_next_step_size(); + if (isnan(step_size_to_attempt)) { + // Integrator has not taken a step. Set the current step size to the + // initial step size. + step_size_to_attempt = get_initial_step_size_target(); + DRAKE_DEMAND(!isnan(step_size_to_attempt)); + } + + // This variable indicates when the integrator has been pushed to its minimum + // step limit. It can only be "true" if minimum step exceptions have been + // suppressed by the user via set_throw_on_minimum_step_size_violation(false), + // and the error control mechanism determines that the step is as low as it + // can go. + bool at_minimum_step_size = false; + + bool step_succeeded = false; + do { + // Constants used to determine whether modifications to the step size are + // close enough to the attempted step size to use the unadjusted originals, + // or (1) whether the step size to be attempted is so small that we should + // consider it to be artificially limited or (2) whether the step size to + // be attempted is sufficiently close to that requested such that the step + // size should be stretched slightly. + const double near_enough_smaller = 0.95; + const double near_enough_larger = 1.001; + + // If we lose more than a small fraction of the step size we wanted + // to take due to a need to stop at h_max, make a note of that so the + // step size adjuster won't try to grow from the current step. + bool h_was_artificially_limited = false; + if (h_max < near_enough_smaller * step_size_to_attempt) { + // h_max much smaller than current step size. + h_was_artificially_limited = true; + step_size_to_attempt = h_max; + } else { + if (h_max < near_enough_larger * step_size_to_attempt) { + // h_max is roughly current step. Make it the step size to prevent + // creating a small sliver (the remaining step). + step_size_to_attempt = h_max; + } + } + + // Limit the current step size. + step_size_to_attempt = min(step_size_to_attempt, get_maximum_step_size()); + + // Keep adjusting the integration step size until any integrator + // convergence failures disappear. Note: this loop's correctness is + // predicated on the assumption that an integrator will always converge for + // a sufficiently small, yet nonzero step size. + T adjusted_step_size = step_size_to_attempt; + while (!Step(adjusted_step_size)) { + DRAKE_LOGGER_DEBUG("Sub-step failed at {}", adjusted_step_size); + adjusted_step_size *= subdivision_factor_; + + // Note: we could give the user more rope to hang themselves by looking + // for zero rather than machine epsilon, which might be advantageous if + // the user were modeling systems over extremely small time scales. + // However, that issue could be addressed instead by scaling units, and + // using machine epsilon allows failure to be detected much more rapidly. + if (adjusted_step_size < std::numeric_limits::epsilon()) { + throw std::runtime_error("Integrator has been directed to a near zero-" + "length step in order to obtain convergence."); + } + ValidateSmallerStepSize(step_size_to_attempt, adjusted_step_size); + ++num_shrinkages_from_substep_failures_; + ++num_substep_failures_; + } + step_size_to_attempt = adjusted_step_size; + + //-------------------------------------------------------------------- + T err_norm = CalcStateChangeNorm(*get_error_estimate()); + T next_step_size; + std::tie(step_succeeded, next_step_size) = CalcAdjustedStepSize( + err_norm, step_size_to_attempt, &at_minimum_step_size); + DRAKE_LOGGER_DEBUG("Succeeded? {}, Next step size: {}", + step_succeeded, next_step_size); + + if (step_succeeded) { + // Only update the next step size (retain the previous one) if the + // step size was not artificially limited. + if (!h_was_artificially_limited) + ideal_next_step_size_ = next_step_size; + + if (isnan(get_actual_initial_step_size_taken())) + set_actual_initial_step_size_taken(step_size_to_attempt); + + // Record the adapted step size taken. + if (isnan(get_smallest_adapted_step_size_taken()) || + (step_size_to_attempt < get_smallest_adapted_step_size_taken() && + step_size_to_attempt < h_max)) + set_smallest_adapted_step_size_taken(step_size_to_attempt); + } else { + ++num_shrinkages_from_error_control_; + + // Set the next step size to attempt. + step_size_to_attempt = next_step_size; + + // Reset the time, state, and time derivative at t0. + get_mutable_context()->SetTime(current_time); + xc.SetFromVector(xc0_save_); + if (get_dense_output()) { + // Take dense output one step back to undo + // the last integration step. + get_mutable_dense_output()->Rollback(); + } + } + } while (!step_succeeded); + return (step_size_to_attempt == h_max); +} + +template +T IntegratorBase::CalcStateChangeNorm( + const ContinuousState& dx_state) const { + using std::max; + const Context& context = get_context(); + const System& system = get_system(); + + // Get weighting matrices. + const auto& qbar_v_weight = this->get_generalized_state_weight_vector(); + const auto& z_weight = this->get_misc_state_weight_vector(); + + // Get the differences in the generalized position, velocity, and + // miscellaneous continuous state vectors. + const VectorBase& dgq = dx_state.get_generalized_position(); + const VectorBase& dgv = dx_state.get_generalized_velocity(); + const VectorBase& dgz = dx_state.get_misc_continuous_state(); + + // (re-)Initialize pinvN_dq_change_ and weighted_q_change_, if necessary. + // Reinitialization might be required if the system state variables can + // change during the course of the simulation. + if (pinvN_dq_change_ == nullptr) { + pinvN_dq_change_ = std::make_unique>(dgv.size()); + weighted_q_change_ = std::make_unique>(dgq.size()); + } + DRAKE_DEMAND(pinvN_dq_change_->size() == dgv.size()); + DRAKE_DEMAND(weighted_q_change_->size() == dgq.size()); + + // TODO(edrumwri): Acquire characteristic time properly from the system + // (i.e., modify the System to provide this value). + const double characteristic_time = 1.0; + + // Computes the infinity norm of the weighted velocity variables. + unweighted_substate_change_ = dgv.CopyToVector(); + T v_nrm = qbar_v_weight.cwiseProduct(unweighted_substate_change_). + template lpNorm() * characteristic_time; + + // Compute the infinity norm of the weighted auxiliary variables. + unweighted_substate_change_ = dgz.CopyToVector(); + T z_nrm = (z_weight.cwiseProduct(unweighted_substate_change_)) + .template lpNorm(); + + // Compute N * Wq * dq = N * Wꝗ * N+ * dq. + unweighted_substate_change_ = dgq.CopyToVector(); + system.MapQDotToVelocity(context, unweighted_substate_change_, + pinvN_dq_change_.get()); + system.MapVelocityToQDot( + context, qbar_v_weight.cwiseProduct(pinvN_dq_change_->CopyToVector()), + weighted_q_change_.get()); + T q_nrm = weighted_q_change_->CopyToVector(). + template lpNorm(); + DRAKE_LOGGER_DEBUG("dq norm: {}, dv norm: {}, dz norm: {}", + q_nrm, v_nrm, z_nrm); + + // Return NaN if one of the values is NaN (whether std::max does this is + // dependent upon ordering!) + using std::isnan; + if (isnan(q_nrm) || isnan(v_nrm) || isnan(z_nrm)) + return std::numeric_limits::quiet_NaN(); + + // TODO(edrumwri): Record the worst offender (which of the norms resulted + // in the largest value). + // Infinity norm of the concatenation of multiple vectors is equal to the + // maximum of the infinity norms of the individual vectors. + return max(z_nrm, max(q_nrm, v_nrm)); +} + +template +std::pair IntegratorBase::CalcAdjustedStepSize( + const T& err, + const T& step_taken, + bool* at_minimum_step_size) const { + using std::pow; + using std::min; + using std::max; + using std::isnan; + using std::isinf; + + // Magic numbers come from Simbody. + const double kSafety = 0.9; + const double kMinShrink = 0.1; + const double kMaxGrow = 5.0; + const double kHysteresisLow = 0.9; + const double kHysteresisHigh = 1.2; + + // Get the order for the integrator's error estimate. + const int err_order = get_error_estimate_order(); + + // Set value for new step size to invalid value initially. + T new_step_size(-1); + + // First, make a guess at the next step size to use based on + // the supplied error norm. Watch out for NaN. Further adjustments will be + // made in blocks of code that follow. + if (isnan(err) || isinf(err)) { // e.g., integrand returned NaN. + new_step_size = kMinShrink * step_taken; + return std::make_pair(false, new_step_size); + } else { + if (err == 0) { // A "perfect" step; can happen if no dofs for example. + new_step_size = kMaxGrow * step_taken; + } else { // Choose best step for skating just below the desired accuracy. + new_step_size = kSafety * step_taken * + pow(get_accuracy_in_use() / err, 1.0 / err_order); + } + } + + // Error indicates that the step size can be increased. + if (new_step_size > step_taken) { + // If the integrator has been directed down to the minimum step size, but + // now error indicates that the step size can be increased, de-activate + // at_minimum_step_size. + *at_minimum_step_size = false; + + // If the new step is bigger than the old, don't make the change if the + // old one was small for some unimportant reason (like reached a publishing + // interval). Also, don't grow the step size if the change would be very + // small; better to keep the step size stable in that case (maybe just + // for aesthetic reasons). + if (new_step_size < kHysteresisHigh * step_taken) + new_step_size = step_taken; + } + + // If error indicates that we should shrink the step size but are not allowed + // to, quit and indicate that the step was successful. + if (new_step_size < step_taken && *at_minimum_step_size) { + return std::make_pair(true, step_taken); + } + + // If we're supposed to shrink the step size but the one we have actually + // achieved the desired accuracy last time, we won't change the step now. + // Otherwise, if we are going to shrink the step, let's not be shy -- we'll + // shrink it by at least a factor of kHysteresisLow. + if (new_step_size < step_taken) { + if (err <= get_accuracy_in_use()) { + new_step_size = step_taken; // not this time + } else { + T test_value = kHysteresisLow * step_taken; + new_step_size = min(new_step_size, test_value); + } + } + + // Keep the size change within the allowable bounds. + T max_grow_step = kMaxGrow * step_taken; + T min_shrink_step = kMinShrink * step_taken; + new_step_size = min(new_step_size, max_grow_step); + new_step_size = max(new_step_size, min_shrink_step); + + // Apply user-requested limits on min and max step size. + // TODO(edrumwri): Introduce some feedback to the user when integrator wants + // to take a smaller step than user has selected as the minimum. Options for + // this feedback could include throwing a special exception, logging, setting + // a flag in the integrator that allows throwing an exception, or returning + // a special status from IntegrateNoFurtherThanTime(). + if (!isnan(get_maximum_step_size())) + new_step_size = min(new_step_size, get_maximum_step_size()); + ValidateSmallerStepSize(step_taken, new_step_size); + + // Increase the next step size, as necessary. + new_step_size = max(new_step_size, get_working_minimum_step_size()); + if (new_step_size == get_working_minimum_step_size()) { + // Indicate that the step is integrator is now trying the minimum step + // size. + *at_minimum_step_size = true; + + // If the integrator wants to shrink the step size below the + // minimum allowed and exceptions are suppressed, indicate that status. + if (new_step_size < step_taken) + return std::make_pair(false, new_step_size); + } + + return std::make_pair(new_step_size >= step_taken, new_step_size); +} + +template +typename IntegratorBase::StepResult + IntegratorBase::IntegrateNoFurtherThanTime( + const T& publish_time, const T& update_time, const T& boundary_time) { + if (!IntegratorBase::is_initialized()) + throw std::logic_error("Integrator not initialized."); + + // Now that integrator has been checked for initialization, get the current + // time. + const T t0 = context_->get_time(); + + // Verify that h's are non-negative. + const T publish_dt = publish_time - t0; + const T update_dt = update_time - t0; + const T boundary_dt = boundary_time - t0; + if (publish_dt < 0.0) + throw std::logic_error("Publish h is negative."); + if (update_dt < 0.0) + throw std::logic_error("Update h is negative."); + if (boundary_dt < 0.0) + throw std::logic_error("Boundary h is negative."); + + // The size of the integration step is the minimum of the time until the next + // update event, the time until the next publish event, the boundary time + // (i.e., the maximum time that the user wished to step to), and the maximum + // step size (which may stretch slightly to hit a discrete event). + + // We report to the caller which event ultimately constrained the step size. + // If multiple events constrained it equally, we prefer to report update + // events over publish events, publish events over boundary step limits, + // and boundary limits over maximum step size limits. The caller must + // determine event simultaneity by inspecting the time. + + // The maintainer of this code is advised to consider that, while updates + // and boundary times, may both conceptually be deemed events, the distinction + // is made for a reason. If both an update and a boundary time occur + // simultaneously, the following behavior should result: + // (1) kReachedUpdateTime is returned, (2) Simulator::AdvanceTo() performs the + // necessary update, (3) IntegrateNoFurtherThanTime() is called with + // boundary_time equal to the current time in the context and returns + // kReachedBoundaryTime, and (4) the simulation terminates. This sequence of + // operations will ensure that the simulation state is valid if + // Simulator::AdvanceTo() is called again to advance time further. + + // We now analyze the following simultaneous cases with respect to Simulator: + // + // { publish, update } + // kReachedUpdateTime will be returned, an update will be followed by a + // publish. + // + // { publish, update, max step } + // kReachedUpdateTime will be returned, an update will be followed by a + // publish. + // + // { publish, boundary time, max step } + // kReachedPublishTime will be returned, a publish will be performed followed + // by another call to this function, which should return kReachedBoundaryTime + // (followed in rapid succession by AdvanceTo(.) return). + // + // { publish, boundary time, max step } + // kReachedPublishTime will be returned, a publish will be performed followed + // by another call to this function, which should return kReachedBoundaryTime + // (followed in rapid succession by AdvanceTo(.) return). + // + // { publish, update, boundary time, maximum step size } + // kUpdateTimeReached will be returned, an update followed by a publish + // will then be performed followed by another call to this function, which + // should return kReachedBoundaryTime (followed in rapid succession by + // AdvanceTo(.) return). + + // By default, the target time is that of the the next discrete update event. + StepResult candidate_result = IntegratorBase::kReachedUpdateTime; + T target_time = update_time; + + // If the next discrete publish event is sooner than the next discrete update + // event, the time of the publish event becomes the target time. + if (publish_time < update_time) { + candidate_result = IntegratorBase::kReachedPublishTime; + target_time = publish_time; + } + + // If the stop time (boundary time) is sooner than the candidate, use it + // instead. + if (boundary_time < target_time) { + candidate_result = IntegratorBase::kReachedBoundaryTime; + target_time = boundary_time; + } + + // If there is no continuous state, there will be no need to limit the + // integration step size. + if (get_context().num_continuous_states() == 0) { + Context* context = get_mutable_context(); + context->SetTime(target_time); + return candidate_result; + } + + // If all events are further into the future than the maximum step + // size times a stretch factor of 1.01, the maximum time becomes the + // target time. Put another way, if the maximum step occurs right before + // an update or a publish, the update or publish is done instead. In contrast, + // we never step past boundary_time, even if doing so would allow hitting a + // publish or an update. + const bool reached_boundary = + (candidate_result == IntegratorBase::kReachedBoundaryTime); + const T& max_h = this->get_maximum_step_size(); + const T max_integrator_time = t0 + max_h; + if ((reached_boundary && max_integrator_time < target_time) || + (!reached_boundary && t0 + max_h * get_stretch_factor() < target_time)) { + candidate_result = IntegratorBase::kTimeHasAdvanced; + target_time = max_integrator_time; + } + + T h = target_time - t0; + if (h < 0.0) throw std::logic_error("Negative h."); + + // If error control is disabled, call the generic stepper. Otherwise, use + // the error controlled method. + bool full_step = true; + if (this->get_fixed_step_mode()) { + T adjusted_h = h; + while (!Step(adjusted_h)) { + ++num_shrinkages_from_substep_failures_; + ++num_substep_failures_; + adjusted_h *= subdivision_factor_; + ValidateSmallerStepSize(h, adjusted_h); + full_step = false; + } + } else { + full_step = StepOnceErrorControlledAtMost(h); + } + if (get_dense_output()) { + // Consolidates current dense output, merging the step + // taken into its internal representation. + get_mutable_dense_output()->Consolidate(); + } + + // Update generic statistics. + const T actual_h = context_->get_time() - t0; + UpdateStepStatistics(actual_h); + + if (full_step || context_->get_time() >= target_time) { + // Correct any rounding error that may have caused the time to overrun + // the target time. + context_->SetTime(target_time); + + // If the integrator took the entire maximum step size we allowed above, + // we report to the caller that a step constraint was hit, which may + // indicate a discrete event has arrived. + return candidate_result; + } else { + // Otherwise, we expect that time has advanced, but no event has arrived. + return IntegratorBase::kTimeHasAdvanced; + } +} + +} // namespace systems +} // namespace drake + +DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( + class drake::systems::IntegratorBase) + diff --git a/systems/analysis/integrator_base.h b/systems/analysis/integrator_base.h index f557ad64f2a8..81e3cfe1ac90 100644 --- a/systems/analysis/integrator_base.h +++ b/systems/analysis/integrator_base.h @@ -1,10 +1,12 @@ #pragma once #include +#include #include #include #include +#include "drake/common/default_scalars.h" #include "drake/common/drake_assert.h" #include "drake/common/drake_copyable.h" #include "drake/common/text_logging.h" @@ -1717,462 +1719,8 @@ class IntegratorBase { T req_initial_step_size_{nan()}; // means "unspecified, use default" }; -template -bool IntegratorBase::StepOnceErrorControlledAtMost(const T& h_max) { - using std::isnan; - using std::min; - - // Verify that the integrator supports error estimates. - if (!supports_error_estimation()) { - throw std::logic_error("StepOnceErrorControlledAtMost() requires error " - "estimation."); - } - - // Save time, continuous variables, and time derivative because we'll possibly - // revert time and state. - const Context& context = get_context(); - const T current_time = context.get_time(); - VectorBase& xc = - get_mutable_context()->get_mutable_continuous_state_vector(); - xc0_save_ = xc.CopyToVector(); - - // Set the step size to attempt. - T step_size_to_attempt = get_ideal_next_step_size(); - if (isnan(step_size_to_attempt)) { - // Integrator has not taken a step. Set the current step size to the - // initial step size. - step_size_to_attempt = get_initial_step_size_target(); - DRAKE_DEMAND(!isnan(step_size_to_attempt)); - } - - // This variable indicates when the integrator has been pushed to its minimum - // step limit. It can only be "true" if minimum step exceptions have been - // suppressed by the user via set_throw_on_minimum_step_size_violation(false), - // and the error control mechanism determines that the step is as low as it - // can go. - bool at_minimum_step_size = false; - - bool step_succeeded = false; - do { - // Constants used to determine whether modifications to the step size are - // close enough to the attempted step size to use the unadjusted originals, - // or (1) whether the step size to be attempted is so small that we should - // consider it to be artificially limited or (2) whether the step size to - // be attempted is sufficiently close to that requested such that the step - // size should be stretched slightly. - const double near_enough_smaller = 0.95; - const double near_enough_larger = 1.001; - - // If we lose more than a small fraction of the step size we wanted - // to take due to a need to stop at h_max, make a note of that so the - // step size adjuster won't try to grow from the current step. - bool h_was_artificially_limited = false; - if (h_max < near_enough_smaller * step_size_to_attempt) { - // h_max much smaller than current step size. - h_was_artificially_limited = true; - step_size_to_attempt = h_max; - } else { - if (h_max < near_enough_larger * step_size_to_attempt) { - // h_max is roughly current step. Make it the step size to prevent - // creating a small sliver (the remaining step). - step_size_to_attempt = h_max; - } - } - - // Limit the current step size. - step_size_to_attempt = min(step_size_to_attempt, get_maximum_step_size()); - - // Keep adjusting the integration step size until any integrator - // convergence failures disappear. Note: this loop's correctness is - // predicated on the assumption that an integrator will always converge for - // a sufficiently small, yet nonzero step size. - T adjusted_step_size = step_size_to_attempt; - while (!Step(adjusted_step_size)) { - DRAKE_LOGGER_DEBUG("Sub-step failed at {}", adjusted_step_size); - adjusted_step_size *= subdivision_factor_; - - // Note: we could give the user more rope to hang themselves by looking - // for zero rather than machine epsilon, which might be advantageous if - // the user were modeling systems over extremely small time scales. - // However, that issue could be addressed instead by scaling units, and - // using machine epsilon allows failure to be detected much more rapidly. - if (adjusted_step_size < std::numeric_limits::epsilon()) { - throw std::runtime_error("Integrator has been directed to a near zero-" - "length step in order to obtain convergence."); - } - ValidateSmallerStepSize(step_size_to_attempt, adjusted_step_size); - ++num_shrinkages_from_substep_failures_; - ++num_substep_failures_; - } - step_size_to_attempt = adjusted_step_size; - - //-------------------------------------------------------------------- - T err_norm = CalcStateChangeNorm(*get_error_estimate()); - T next_step_size; - std::tie(step_succeeded, next_step_size) = CalcAdjustedStepSize( - err_norm, step_size_to_attempt, &at_minimum_step_size); - DRAKE_LOGGER_DEBUG("Succeeded? {}, Next step size: {}", - step_succeeded, next_step_size); - - if (step_succeeded) { - // Only update the next step size (retain the previous one) if the - // step size was not artificially limited. - if (!h_was_artificially_limited) - ideal_next_step_size_ = next_step_size; - - if (isnan(get_actual_initial_step_size_taken())) - set_actual_initial_step_size_taken(step_size_to_attempt); - - // Record the adapted step size taken. - if (isnan(get_smallest_adapted_step_size_taken()) || - (step_size_to_attempt < get_smallest_adapted_step_size_taken() && - step_size_to_attempt < h_max)) - set_smallest_adapted_step_size_taken(step_size_to_attempt); - } else { - ++num_shrinkages_from_error_control_; - - // Set the next step size to attempt. - step_size_to_attempt = next_step_size; - - // Reset the time, state, and time derivative at t0. - get_mutable_context()->SetTime(current_time); - xc.SetFromVector(xc0_save_); - if (get_dense_output()) { - // Take dense output one step back to undo - // the last integration step. - get_mutable_dense_output()->Rollback(); - } - } - } while (!step_succeeded); - return (step_size_to_attempt == h_max); -} - -template -T IntegratorBase::CalcStateChangeNorm( - const ContinuousState& dx_state) const { - using std::max; - const Context& context = get_context(); - const System& system = get_system(); - - // Get weighting matrices. - const auto& qbar_v_weight = this->get_generalized_state_weight_vector(); - const auto& z_weight = this->get_misc_state_weight_vector(); - - // Get the differences in the generalized position, velocity, and - // miscellaneous continuous state vectors. - const VectorBase& dgq = dx_state.get_generalized_position(); - const VectorBase& dgv = dx_state.get_generalized_velocity(); - const VectorBase& dgz = dx_state.get_misc_continuous_state(); - - // (re-)Initialize pinvN_dq_change_ and weighted_q_change_, if necessary. - // Reinitialization might be required if the system state variables can - // change during the course of the simulation. - if (pinvN_dq_change_ == nullptr) { - pinvN_dq_change_ = std::make_unique>(dgv.size()); - weighted_q_change_ = std::make_unique>(dgq.size()); - } - DRAKE_DEMAND(pinvN_dq_change_->size() == dgv.size()); - DRAKE_DEMAND(weighted_q_change_->size() == dgq.size()); - - // TODO(edrumwri): Acquire characteristic time properly from the system - // (i.e., modify the System to provide this value). - const double characteristic_time = 1.0; - - // Computes the infinity norm of the weighted velocity variables. - unweighted_substate_change_ = dgv.CopyToVector(); - T v_nrm = qbar_v_weight.cwiseProduct(unweighted_substate_change_). - template lpNorm() * characteristic_time; - - // Compute the infinity norm of the weighted auxiliary variables. - unweighted_substate_change_ = dgz.CopyToVector(); - T z_nrm = (z_weight.cwiseProduct(unweighted_substate_change_)) - .template lpNorm(); - - // Compute N * Wq * dq = N * Wꝗ * N+ * dq. - unweighted_substate_change_ = dgq.CopyToVector(); - system.MapQDotToVelocity(context, unweighted_substate_change_, - pinvN_dq_change_.get()); - system.MapVelocityToQDot( - context, qbar_v_weight.cwiseProduct(pinvN_dq_change_->CopyToVector()), - weighted_q_change_.get()); - T q_nrm = weighted_q_change_->CopyToVector(). - template lpNorm(); - DRAKE_LOGGER_DEBUG("dq norm: {}, dv norm: {}, dz norm: {}", - q_nrm, v_nrm, z_nrm); - - // Return NaN if one of the values is NaN (whether std::max does this is - // dependent upon ordering!) - using std::isnan; - if (isnan(q_nrm) || isnan(v_nrm) || isnan(z_nrm)) - return std::numeric_limits::quiet_NaN(); - - // TODO(edrumwri): Record the worst offender (which of the norms resulted - // in the largest value). - // Infinity norm of the concatenation of multiple vectors is equal to the - // maximum of the infinity norms of the individual vectors. - return max(z_nrm, max(q_nrm, v_nrm)); -} - -template -std::pair IntegratorBase::CalcAdjustedStepSize( - const T& err, - const T& step_taken, - bool* at_minimum_step_size) const { - using std::pow; - using std::min; - using std::max; - using std::isnan; - using std::isinf; - - // Magic numbers come from Simbody. - const double kSafety = 0.9; - const double kMinShrink = 0.1; - const double kMaxGrow = 5.0; - const double kHysteresisLow = 0.9; - const double kHysteresisHigh = 1.2; - - // Get the order for the integrator's error estimate. - const int err_order = get_error_estimate_order(); - - // Set value for new step size to invalid value initially. - T new_step_size(-1); - - // First, make a guess at the next step size to use based on - // the supplied error norm. Watch out for NaN. Further adjustments will be - // made in blocks of code that follow. - if (isnan(err) || isinf(err)) { // e.g., integrand returned NaN. - new_step_size = kMinShrink * step_taken; - return std::make_pair(false, new_step_size); - } else { - if (err == 0) { // A "perfect" step; can happen if no dofs for example. - new_step_size = kMaxGrow * step_taken; - } else { // Choose best step for skating just below the desired accuracy. - new_step_size = kSafety * step_taken * - pow(get_accuracy_in_use() / err, 1.0 / err_order); - } - } - - // Error indicates that the step size can be increased. - if (new_step_size > step_taken) { - // If the integrator has been directed down to the minimum step size, but - // now error indicates that the step size can be increased, de-activate - // at_minimum_step_size. - *at_minimum_step_size = false; - - // If the new step is bigger than the old, don't make the change if the - // old one was small for some unimportant reason (like reached a publishing - // interval). Also, don't grow the step size if the change would be very - // small; better to keep the step size stable in that case (maybe just - // for aesthetic reasons). - if (new_step_size < kHysteresisHigh * step_taken) - new_step_size = step_taken; - } - - // If error indicates that we should shrink the step size but are not allowed - // to, quit and indicate that the step was successful. - if (new_step_size < step_taken && *at_minimum_step_size) { - return std::make_pair(true, step_taken); - } - - // If we're supposed to shrink the step size but the one we have actually - // achieved the desired accuracy last time, we won't change the step now. - // Otherwise, if we are going to shrink the step, let's not be shy -- we'll - // shrink it by at least a factor of kHysteresisLow. - if (new_step_size < step_taken) { - if (err <= get_accuracy_in_use()) { - new_step_size = step_taken; // not this time - } else { - T test_value = kHysteresisLow * step_taken; - new_step_size = min(new_step_size, test_value); - } - } - - // Keep the size change within the allowable bounds. - T max_grow_step = kMaxGrow * step_taken; - T min_shrink_step = kMinShrink * step_taken; - new_step_size = min(new_step_size, max_grow_step); - new_step_size = max(new_step_size, min_shrink_step); - - // Apply user-requested limits on min and max step size. - // TODO(edrumwri): Introduce some feedback to the user when integrator wants - // to take a smaller step than user has selected as the minimum. Options for - // this feedback could include throwing a special exception, logging, setting - // a flag in the integrator that allows throwing an exception, or returning - // a special status from IntegrateNoFurtherThanTime(). - if (!isnan(get_maximum_step_size())) - new_step_size = min(new_step_size, get_maximum_step_size()); - ValidateSmallerStepSize(step_taken, new_step_size); - - // Increase the next step size, as necessary. - new_step_size = max(new_step_size, get_working_minimum_step_size()); - if (new_step_size == get_working_minimum_step_size()) { - // Indicate that the step is integrator is now trying the minimum step - // size. - *at_minimum_step_size = true; - - // If the integrator wants to shrink the step size below the - // minimum allowed and exceptions are suppressed, indicate that status. - if (new_step_size < step_taken) - return std::make_pair(false, new_step_size); - } - - return std::make_pair(new_step_size >= step_taken, new_step_size); -} - -template -typename IntegratorBase::StepResult - IntegratorBase::IntegrateNoFurtherThanTime( - const T& publish_time, const T& update_time, const T& boundary_time) { - if (!IntegratorBase::is_initialized()) - throw std::logic_error("Integrator not initialized."); - - // Now that integrator has been checked for initialization, get the current - // time. - const T t0 = context_->get_time(); - - // Verify that h's are non-negative. - const T publish_dt = publish_time - t0; - const T update_dt = update_time - t0; - const T boundary_dt = boundary_time - t0; - if (publish_dt < 0.0) - throw std::logic_error("Publish h is negative."); - if (update_dt < 0.0) - throw std::logic_error("Update h is negative."); - if (boundary_dt < 0.0) - throw std::logic_error("Boundary h is negative."); - - // The size of the integration step is the minimum of the time until the next - // update event, the time until the next publish event, the boundary time - // (i.e., the maximum time that the user wished to step to), and the maximum - // step size (which may stretch slightly to hit a discrete event). - - // We report to the caller which event ultimately constrained the step size. - // If multiple events constrained it equally, we prefer to report update - // events over publish events, publish events over boundary step limits, - // and boundary limits over maximum step size limits. The caller must - // determine event simultaneity by inspecting the time. - - // The maintainer of this code is advised to consider that, while updates - // and boundary times, may both conceptually be deemed events, the distinction - // is made for a reason. If both an update and a boundary time occur - // simultaneously, the following behavior should result: - // (1) kReachedUpdateTime is returned, (2) Simulator::AdvanceTo() performs the - // necessary update, (3) IntegrateNoFurtherThanTime() is called with - // boundary_time equal to the current time in the context and returns - // kReachedBoundaryTime, and (4) the simulation terminates. This sequence of - // operations will ensure that the simulation state is valid if - // Simulator::AdvanceTo() is called again to advance time further. - - // We now analyze the following simultaneous cases with respect to Simulator: - // - // { publish, update } - // kReachedUpdateTime will be returned, an update will be followed by a - // publish. - // - // { publish, update, max step } - // kReachedUpdateTime will be returned, an update will be followed by a - // publish. - // - // { publish, boundary time, max step } - // kReachedPublishTime will be returned, a publish will be performed followed - // by another call to this function, which should return kReachedBoundaryTime - // (followed in rapid succession by AdvanceTo(.) return). - // - // { publish, boundary time, max step } - // kReachedPublishTime will be returned, a publish will be performed followed - // by another call to this function, which should return kReachedBoundaryTime - // (followed in rapid succession by AdvanceTo(.) return). - // - // { publish, update, boundary time, maximum step size } - // kUpdateTimeReached will be returned, an update followed by a publish - // will then be performed followed by another call to this function, which - // should return kReachedBoundaryTime (followed in rapid succession by - // AdvanceTo(.) return). - - // By default, the target time is that of the the next discrete update event. - StepResult candidate_result = IntegratorBase::kReachedUpdateTime; - T target_time = update_time; - - // If the next discrete publish event is sooner than the next discrete update - // event, the time of the publish event becomes the target time. - if (publish_time < update_time) { - candidate_result = IntegratorBase::kReachedPublishTime; - target_time = publish_time; - } - - // If the stop time (boundary time) is sooner than the candidate, use it - // instead. - if (boundary_time < target_time) { - candidate_result = IntegratorBase::kReachedBoundaryTime; - target_time = boundary_time; - } - - // If there is no continuous state, there will be no need to limit the - // integration step size. - if (get_context().num_continuous_states() == 0) { - Context* context = get_mutable_context(); - context->SetTime(target_time); - return candidate_result; - } - - // If all events are further into the future than the maximum step - // size times a stretch factor of 1.01, the maximum time becomes the - // target time. Put another way, if the maximum step occurs right before - // an update or a publish, the update or publish is done instead. In contrast, - // we never step past boundary_time, even if doing so would allow hitting a - // publish or an update. - const bool reached_boundary = - (candidate_result == IntegratorBase::kReachedBoundaryTime); - const T& max_h = this->get_maximum_step_size(); - const T max_integrator_time = t0 + max_h; - if ((reached_boundary && max_integrator_time < target_time) || - (!reached_boundary && t0 + max_h * get_stretch_factor() < target_time)) { - candidate_result = IntegratorBase::kTimeHasAdvanced; - target_time = max_integrator_time; - } - - T h = target_time - t0; - if (h < 0.0) throw std::logic_error("Negative h."); - - // If error control is disabled, call the generic stepper. Otherwise, use - // the error controlled method. - bool full_step = true; - if (this->get_fixed_step_mode()) { - T adjusted_h = h; - while (!Step(adjusted_h)) { - ++num_shrinkages_from_substep_failures_; - ++num_substep_failures_; - adjusted_h *= subdivision_factor_; - ValidateSmallerStepSize(h, adjusted_h); - full_step = false; - } - } else { - full_step = StepOnceErrorControlledAtMost(h); - } - if (get_dense_output()) { - // Consolidates current dense output, merging the step - // taken into its internal representation. - get_mutable_dense_output()->Consolidate(); - } - - // Update generic statistics. - const T actual_h = context_->get_time() - t0; - UpdateStepStatistics(actual_h); - - if (full_step || context_->get_time() >= target_time) { - // Correct any rounding error that may have caused the time to overrun - // the target time. - context_->SetTime(target_time); - - // If the integrator took the entire maximum step size we allowed above, - // we report to the caller that a step constraint was hit, which may - // indicate a discrete event has arrived. - return candidate_result; - } else { - // Otherwise, we expect that time has advanced, but no event has arrived. - return IntegratorBase::kTimeHasAdvanced; - } -} - } // namespace systems } // namespace drake + +DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( + class drake::systems::IntegratorBase) diff --git a/systems/analysis/lyapunov.cc b/systems/analysis/lyapunov.cc index 6be47982672c..570c06f41b2f 100644 --- a/systems/analysis/lyapunov.cc +++ b/systems/analysis/lyapunov.cc @@ -2,6 +2,7 @@ #include +#include "drake/common/symbolic.h" #include "drake/common/text_logging.h" #include "drake/math/autodiff.h" #include "drake/math/autodiff_gradient.h" diff --git a/systems/analysis/lyapunov.h b/systems/analysis/lyapunov.h index e3b8542c4170..a355f85bc4e5 100644 --- a/systems/analysis/lyapunov.h +++ b/systems/analysis/lyapunov.h @@ -2,7 +2,8 @@ #include -#include "drake/common/symbolic.h" +#include "drake/common/autodiff.h" +#include "drake/common/eigen_types.h" #include "drake/systems/framework/context.h" #include "drake/systems/framework/system.h" diff --git a/systems/analysis/radau_integrator.cc b/systems/analysis/radau_integrator.cc index f91e78a727b2..6fd45b2717bf 100644 --- a/systems/analysis/radau_integrator.cc +++ b/systems/analysis/radau_integrator.cc @@ -1,8 +1,734 @@ #include "drake/systems/analysis/radau_integrator.h" +#include + #include "drake/common/autodiff.h" -DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( - class ::drake::systems::RadauIntegrator) +namespace drake { +namespace systems { + +template +void RadauIntegrator::DoResetImplicitIntegratorStatistics() { + num_nr_iterations_ = 0; + num_err_est_jacobian_reforms_ = 0; + num_err_est_jacobian_function_evaluations_ = 0; + num_err_est_iter_factorizations_ = 0; + num_err_est_function_evaluations_ = 0; + num_err_est_nr_iterations_ = 0; +} + +template +RadauIntegrator::RadauIntegrator(const System& system, + Context* context) : ImplicitIntegrator(system, context) { + A_.resize(num_stages, num_stages); + + // TODO(edrumwri) Convert A_, c_, b_, and d_ to fixed-size when Drake supports + // "if constexpr" (C++17 feature). Lack of partial template function + // specialization makes turning those into fixed-sizes painful at the moment. + + if (num_stages == 2) { + // Set the matrix coefficients (from [Hairer, 1996] Table 5.5). + A_(0, 0) = 5.0/12; A_(0, 1) = -1.0/12; + A_(1, 0) = 3.0/4; A_(1, 1) = 1.0/4; + + // Set the time coefficients (from the same table). + c_ = { 1.0/3, 1.0 }; + + // Set the propagation constants (again, from the same table). + b_ = { 3.0/4, 1.0/4 }; + + // Set the scaling constants for the solution using (8.2b) and Table 5.6. + d_ = { 0.0, 1.0 }; + } else { + // For implicit Euler integration. + A_(0, 0) = 1.0; + c_ = { 1.0 }; + b_ = { 1.0 }; + d_ = { 1.0 }; + } +} + +template +void RadauIntegrator::DoInitialize() { + using std::isnan; + + // Compute the tensor product of A with the identity matrix. A is a + // num_stages x num_stages matrix. We need the tensor product to be a + // m x m-dimensional matrix, where m = num_stages * state_dim. Thus the + // number of rows/columns of the identity matrix is state_dim. + + const int state_dim = + this->get_context().get_continuous_state_vector().size(); + + // Compute A ⊗ I. + // TODO(edrumwri) The resulting matrix only has s²n non-zeros out of s²n² + // elements (where s is the number of stages)- take advantage of this. + A_tp_eye_ = CalcTensorProduct(A_, MatrixX::Identity(state_dim, state_dim)); + + F_of_Z_.resize(state_dim * num_stages); + + // Allocate storage for changes to state variables during Newton-Raphson. + dx_state_ = this->get_system().AllocateTimeDerivatives(); + + // TODO(edrumwri): Find the best values for the method. + // These values are expected to be good for the two-stage and one-stage + // methods, respectively. + const double kDefaultAccuracy = (num_stages == 2) ? 1e-3 : 1e-1; + const double kLoosestAccuracy = (num_stages == 2) ? 1e-2 : 5e-1; + + // Set an artificial step size target, if not set already. + if (isnan(this->get_initial_step_size_target())) { + // Verify that maximum step size has been set. + if (isnan(this->get_maximum_step_size())) { + throw std::logic_error("Neither initial step size target nor maximum " + "step size has been set!"); + } + + this->request_initial_step_size_target( + this->get_maximum_step_size()); + } + + // If the user asks for accuracy that is looser than the loosest this + // integrator can provide, use the integrator's loosest accuracy setting + // instead. + double working_accuracy = this->get_target_accuracy(); + if (isnan(working_accuracy)) + working_accuracy = kDefaultAccuracy; + else if (working_accuracy > kLoosestAccuracy) + working_accuracy = kLoosestAccuracy; + this->set_accuracy_in_use(working_accuracy); + + // Reset the Jacobian matrix (so that recomputation is forced). + this->get_mutable_jacobian().resize(0, 0); + + // Instantiate the embedded third order Bogacki-Shampine3 integrator. Note + // that we do not worry about setting the initial step size, since that code + // will never be triggered (the integrator will always be used in fixed-step + // mode). + bs3_ = std::make_unique>( + this->get_system(), + this->get_mutable_context()); + + // Instantiate the embedded second-order Runge-Kutta integrator. + rk2_ = std::make_unique>( + this->get_system(), + std::numeric_limits::max() /* no maximum step size */, + this->get_mutable_context()); + + // Maximum step size is not to be a constraint. + bs3_->set_maximum_step_size(std::numeric_limits::max()); + + bs3_->Initialize(); + rk2_->Initialize(); + bs3_->set_fixed_step_mode(true); + // Note: RK2 only operates in fixed step mode. +} + +// Computes F(Z) used in [Hairer, 1996], (IV.8.4). This method evaluates +// the time derivatives of the system given the current iterate Z. +// @param t0 the initial time. +// @param h the integration step size to attempt. +// @param xt0 the continuous state at time t0. +// @param Z the current iterate, of dimension state_dim * num_stages. +// @post the state of the internal context will be set to (t0, xt0) on return. +// @return a (state_dim * num_stages)-dimensional vector. +template +const VectorX& RadauIntegrator::ComputeFofZ( + const T& t0, const T& h, const VectorX& xt0, const VectorX& Z) { + Context* context = this->get_mutable_context(); + const int state_dim = xt0.size(); + + // Evaluate the derivative at each stage. + for (int i = 0, j = 0; i < num_stages; ++i, j += state_dim) { + const auto Z_i = Z.segment(j, state_dim); + context->SetTimeAndContinuousState(t0 + c_[i] * h, xt0 + Z_i); + auto F_i = F_of_Z_.segment(j, state_dim); + F_i = this->EvalTimeDerivatives(*context).CopyToVector(); + } + + return F_of_Z_; +} + +// Computes the solution xtplus (i.e., the continuous state at t0 + h) from the +// continuous state at t0 and the current Newton-Raphson iterate (Z). +template +void RadauIntegrator::ComputeSolutionFromIterate( + const VectorX& xt0, const VectorX& Z, VectorX* xtplus) const { + const int state_dim = xt0.size(); + + // Compute the solution using (IV.8.2b) in [Hairer, 1996]. + xtplus->setZero(); + for (int i = 0, j = 0; i < num_stages; ++i, j += state_dim) { + if (d_[i] != 0.0) + *xtplus += d_[i] * Z.segment(j, state_dim); + } + *xtplus += xt0; +} + +// Computes the next continuous state (at t0 + h) using the Radau method, +// assuming that the method is able to converge at that step size. +// @param t0 the initial time. +// @param h the integration step size to attempt. +// @param xt0 the continuous state at time t0. +// @param[out] xtplus the value for x(t+h) on return. +// @param trial the attempt for this approach (1-4). StepRadau() uses more +// computationally expensive methods as the trial numbers increase. +// @post the internal context will be in an indeterminate state on returning +// `false`. +// @returns `true` if the method was successfully able to take an integration +// step of size `h`. +template +bool RadauIntegrator::StepRadau(const T& t0, const T& h, + const VectorX& xt0, VectorX* xtplus, int trial) { + using std::max; + using std::min; + + // Compute the time at the end of the step. + const T tf = t0 + h; + + // Verify the trial number is valid. + DRAKE_ASSERT(1 <= trial && trial <= 4); + + // Set the state. + Context* context = this->get_mutable_context(); + context->SetTimeAndContinuousState(t0, xt0); + + const int state_dim = xt0.size(); + + // Verify xtplus + DRAKE_ASSERT(xtplus && xtplus->size() == state_dim); + + DRAKE_LOGGER_DEBUG("StepRadau() entered for t={}, h={}, trial={}", + t0, h, trial); + + // TODO(edrumwri) Experiment with setting this as recommended in + // [Hairer, 1996], p. 120. + // Initialize the z iterate using (IV.8.5) in [Hairer, 1996], p. 120 (and + // the corresponding xt+). + Z_.setZero(state_dim * num_stages); + *xtplus = xt0; + DRAKE_LOGGER_DEBUG("Starting state: {}", xtplus->transpose()); + + // Set the iteration matrix construction method. + auto construct_iteration_matrix = [this](const MatrixX& J, const T& dt, + typename ImplicitIntegrator::IterationMatrix* iteration_matrix) { + ComputeRadauIterationMatrix(J, dt, this->A_, iteration_matrix); + }; + + // Calculate Jacobian and iteration matrices (and factorizations), as needed, + // around (t0, xt0). We do not do this calculation if full Newton is in use; + // the calculation will be performed at the beginning of the loop + // instead. + + // TODO(edrumwri) Consider computing the Jacobian matrix around tf and/or + // xtplus. This would give a better Jacobian, but would + // complicate the logic, since the Jacobian would no longer + // (necessarily) be fresh upon fallback to a smaller step size. + if (!this->get_use_full_newton() && + !this->MaybeFreshenMatrices(t0, xt0, h, trial, construct_iteration_matrix, + &iteration_matrix_radau_)) { + return false; + } + + // Initialize the "last" norm of dx; this will be used to detect convergence. + T last_dx_norm = std::numeric_limits::infinity(); + + // Do the Newton-Raphson iterations. + for (int iter = 0; iter < this->max_newton_raphson_iterations(); ++iter) { + DRAKE_LOGGER_DEBUG("Newton-Raphson iteration {}", iter); + + this->FreshenMatricesIfFullNewton( + tf, *xtplus, h, construct_iteration_matrix, &iteration_matrix_radau_); + + // Update the number of Newton-Raphson iterations. + ++num_nr_iterations_; + + // Evaluate the derivatives using the current iterate. + const VectorX& F_of_Z = ComputeFofZ(t0, h, xt0, Z_); + + // Compute the state update using (IV.8.4) in [Hairer, 1996], p. 119, i.e.: + // Solve (I − hA⊗J) ΔZᵏ = h (A⊗I) F(Zᵏ) - Zᵏ for ΔZᵏ, where: + // A_tp_eye ≡ (A⊗I) and (I − hA⊗J) is the iteration matrix. + DRAKE_LOGGER_DEBUG("residual: {}", + (A_tp_eye_ * (h * F_of_Z) - Z_).transpose()); + VectorX dZ = iteration_matrix_radau_.Solve( + A_tp_eye_ * (h * F_of_Z) - Z_); + + // Update the iterate. + Z_ += dZ; + + // Compute the update to the actual continuous state (i.e., x not Z) using + // (IV.8.2b) in [Hairer, 1996], which gives the relationship between x(t0+h) + // and Z: + // x(t0+h) = x(t0) + Σ dᵢ Zᵢ + // Therefore, we can get the relationship between dZ and dx as: + // x* = x(t0) + Σ dᵢ Zᵢ (1) + // x+ = x(t0) + Σ dᵢ (Zᵢ + dZᵢ) (2) + // Subtracting (1) from (2) yields + // dx = Σ dᵢ Zᵢ + // where dx ≡ x+ - x* + VectorX dx = VectorX::Zero(state_dim); + for (int i = 0, j = 0; i < num_stages; ++i, j += state_dim) { + if (d_[i] != 0.0) + dx += d_[i] * dZ.segment(j, state_dim); + } + + dx_state_->SetFromVector(dx); + DRAKE_LOGGER_DEBUG("dx: {}", dx.transpose()); + + // Get the infinity norm of the weighted update vector. + dx_state_->get_mutable_vector().SetFromVector(dx); + T dx_norm = this->CalcStateChangeNorm(*dx_state_); + + // Compute the update. + ComputeSolutionFromIterate(xt0, Z_, &(*xtplus)); + + // Check for Newton-Raphson convergence. + typename ImplicitIntegrator::ConvergenceStatus status = + this->CheckNewtonConvergence(iter, *xtplus, dx, dx_norm, last_dx_norm); + // If it converged, we're done. + if (status == ImplicitIntegrator::ConvergenceStatus::kConverged) + return true; + // If it diverged, we have to abort and try again. + if (status == ImplicitIntegrator::ConvergenceStatus::kDiverged) + break; + // Otherwise, continue to the next Newton-Raphson iteration. + DRAKE_DEMAND(status == + ImplicitIntegrator::ConvergenceStatus::kNotConverged); + + // Update the norm of the state update. + last_dx_norm = dx_norm; + } + + DRAKE_LOGGER_DEBUG("StepRadau() convergence failed"); + + // If Jacobian and iteration matrix factorizations are not reused, there + // is nothing else we can try; otherwise, the following code will recurse + // into this function again, and freshen computations as helpful. Note that + // get_reuse() returns false if "full Newton-Raphson" mode is activated (see + // ImplicitIntegrator::get_use_full_newton()). + if (!this->get_reuse()) + return false; + + // Try StepRadau again, freshening Jacobians and iteration matrix + // factorizations as helpful. + return StepRadau(t0, h, xt0, xtplus, trial+1); +} + +// Computes the next continuous state (at t0 + h) using the implicit trapezoid +// method, assuming that the method is able to converge at that step size. +// @param t0 the initial time. +// @param h the integration step size to attempt. +// @param xt0 the continuous state at time t0. +// @param radau_xtplus the Radau solution for x(t+h). +// @param[out] xtplus the value for x(t+h) on return. +// @param trial the attempt for this approach (1-4). The method uses more +// computationally expensive methods as the trial numbers increase. +// @returns `true` if the method was successfully able to take an integration +// step of size `h` (or `false` otherwise). +template +bool RadauIntegrator::StepImplicitTrapezoid(const T& t0, + const T& h, const VectorX& xt0, const VectorX& dx0, + const VectorX& radau_xtplus, VectorX* xtplus) { + using std::abs; + + DRAKE_LOGGER_DEBUG("StepImplicitTrapezoid(h={}) t={}", + h, t0); + + // Define g(x(t+h)) ≡ x(t+h) - x(t) - h/2 (f(t,x(t)) + f(t+h,x(t+h)) and + // evaluate it at the current x(t+h). + Context* context = this->get_mutable_context(); + std::function()> g = + [&xt0, h, &dx0, context, this]() { + return (context->get_continuous_state().CopyToVector() - xt0 - h/2 * + (dx0 + this->EvalTimeDerivatives( + this->get_context()).CopyToVector())).eval(); + }; + + // Store statistics before calling StepAbstract(). The difference between + // the modified statistics and the stored statistics will be used to compute + // the trapezoid method-specific statistics. + int stored_num_jacobian_evaluations = this->get_num_jacobian_evaluations(); + int stored_num_iter_factorizations = + this->get_num_iteration_matrix_factorizations(); + int64_t stored_num_function_evaluations = + this->get_num_derivative_evaluations(); + int64_t stored_num_jacobian_function_evaluations = + this->get_num_derivative_evaluations_for_jacobian(); + int stored_num_nr_iterations = this->get_num_newton_raphson_iterations(); + + // Step. + bool success = StepImplicitTrapezoidDetail( + t0, h, xt0, g, radau_xtplus, xtplus); + + // Move statistics to implicit trapezoid-specific. + num_err_est_jacobian_reforms_ += + this->get_num_jacobian_evaluations() - stored_num_jacobian_evaluations; + num_err_est_iter_factorizations_ += + this->get_num_iteration_matrix_factorizations() - + stored_num_iter_factorizations; + num_err_est_function_evaluations_ += + this->get_num_derivative_evaluations() - stored_num_function_evaluations; + num_err_est_jacobian_function_evaluations_ += + this->get_num_derivative_evaluations_for_jacobian() - + stored_num_jacobian_function_evaluations; + num_err_est_nr_iterations_ += this->get_num_newton_raphson_iterations() - + stored_num_nr_iterations; + + return success; +} + +// Does all of the real work for the implicit trapezoid method. +template +bool RadauIntegrator::StepImplicitTrapezoidDetail( + const T& t0, const T& h, + const VectorX& xt0, const std::function()>& g, + const VectorX& radau_xtplus, VectorX* xtplus, int trial) { + using std::max; + using std::min; + + // Verify the trial number is valid. + DRAKE_ASSERT(trial >= 1 && trial <= 4); + + // Verify xtplus. + Context* context = this->get_mutable_context(); + DRAKE_ASSERT(xtplus && + xtplus->size() == context->get_continuous_state_vector().size()); + + // Start from the Radau solution, which is close (either O(h³) accurate or + // O(h) accurate, depending on the number of stages) to the true solution and + // hence should be an excellent starting point. + *xtplus = radau_xtplus; + DRAKE_LOGGER_DEBUG("Starting state: {}", xtplus->transpose()); + + DRAKE_LOGGER_DEBUG("StepImplicitTrapezoidDetail() entered for t={}, " + "h={}, trial={}", t0, h, trial); + + // Advance the context time; this means that all derivatives will be computed + // at t+h. Compare against StepRadau, which uses ComputeFofZ (which + // automatically updates the Context to the correct time and state). + const T tf = t0 + h; + context->SetTimeAndContinuousState(tf, *xtplus); + + // Initialize the "last" state update norm; this will be used to detect + // convergence. + T last_dx_norm = std::numeric_limits::infinity(); + + // TODO(edrumwri) Consider computing the Jacobian matrix around tf. + // Calculate Jacobian and iteration matrices (and factorizations), as needed. + // TODO(edrumwri) Consider computing the Jacobian matrix around xtplus. This + // would give a better Jacobian, but would complicate the + // logic, since the Jacobian would no longer (necessarily) be + // fresh upon fallback to a smaller step size. + if (!this->get_use_full_newton() && + !this->MaybeFreshenMatrices(t0, xt0, h, trial, + ComputeImplicitTrapezoidIterationMatrix, + &iteration_matrix_implicit_trapezoid_)) { + return false; + } + + for (int iter = 0; iter < this->max_newton_raphson_iterations(); ++iter) { + DRAKE_LOGGER_DEBUG("Newton-Raphson iteration {}", iter); + ++num_nr_iterations_; + + this->FreshenMatricesIfFullNewton(tf, *xtplus, h, + ComputeImplicitTrapezoidIterationMatrix, + &iteration_matrix_implicit_trapezoid_); + + + // Evaluate the residual error using the current x(t+h). + VectorX goutput = g(); + + // Compute the state update using the equation A*x = -g(), where A is the + // iteration matrix. + // TODO(edrumwri): Allow caller to provide their own solver. + VectorX dx = iteration_matrix_implicit_trapezoid_.Solve(-goutput); + DRAKE_LOGGER_DEBUG("dx: {}", dx.transpose()); + + // Get the infinity norm of the weighted update vector. + dx_state_->get_mutable_vector().SetFromVector(dx); + T dx_norm = this->CalcStateChangeNorm(*dx_state_); + + // Update the state vector. + *xtplus += dx; + context->SetTimeAndContinuousState(tf, *xtplus); + + // Check for Newton-Raphson convergence. + typename ImplicitIntegrator::ConvergenceStatus status = + this->CheckNewtonConvergence(iter, *xtplus, dx, dx_norm, last_dx_norm); + // If it converged, we're done. + if (status == ImplicitIntegrator::ConvergenceStatus::kConverged) + return true; + // If it diverged, we have to abort and try again. + if (status == ImplicitIntegrator::ConvergenceStatus::kDiverged) + break; + // Otherwise, continue to the next Newton-Raphson iteration. + DRAKE_DEMAND(status == + ImplicitIntegrator::ConvergenceStatus::kNotConverged); + + // Update the norm of the state update. + last_dx_norm = dx_norm; + } + + DRAKE_LOGGER_DEBUG("StepImplicitTrapezoidDetail() convergence " + "failed"); + + // If Jacobian and iteration matrix factorizations are not reused, there + // is nothing else we can try. Note that get_reuse() returns false if + // "full Newton-Raphson" mode is activated (see + // ImplicitIntegrator::get_use_full_newton()). + if (!this->get_reuse()) + return false; + + // Try the step again, freshening Jacobians and iteration matrix + // factorizations as helpful. + return StepImplicitTrapezoidDetail( + t0, h, xt0, g, radau_xtplus, xtplus, trial + 1); +} + +// Steps Radau forward by h, if possible. +// @param t0 the initial time. +// @param h the integration step size to attempt. +// @param xt0 the continuous state at time t0. +// @param[out] xtplus_radau contains the Radau integrator solution on return. +// @param[out] xtplus_itr contains the implicit trapezoid solution on return. +// @returns `true` if the integration was successful at the requested step size. +// @pre The time and state in the system's context (stored by the integrator) +// are set to (t0, xt0) on entry. +// @post The time and state of the system's context (stored by the integrator) +// will be set to t0+h and `xtplus_radau` on successful exit (indicated +// by this function returning `true`) and will be indeterminate on +// unsuccessful exit (indicated by this function returning `false`). +template +bool RadauIntegrator::AttemptStepPaired(const T& t0, const T& h, + const VectorX& xt0, VectorX* xtplus_radau, VectorX* xtplus_itr) { + using std::abs; + DRAKE_ASSERT(xtplus_radau); + DRAKE_ASSERT(xtplus_itr); + DRAKE_ASSERT(xtplus_radau->size() == xt0.size()); + DRAKE_ASSERT(xtplus_itr->size() == xt0.size()); + + // Set the time and state in the context. + this->get_mutable_context()->SetTimeAndContinuousState(t0, xt0); + + // Compute the derivative at xt0. NOTE: the derivative is calculated at this + // point (early on in the integration process) in order to reuse the + // derivative evaluation, via the cache, from the last integration step (if + // possible). + const VectorX dx0 = this->EvalTimeDerivatives( + this->get_context()).CopyToVector(); + + // Use the current state as the candidate value for the next state. + // [Hairer 1996] validates this choice (p. 120). + *xtplus_radau = xt0; + + // Do the Radau step. + if (!StepRadau(t0, h, xt0, xtplus_radau)) { + DRAKE_LOGGER_DEBUG("Radau approach did not converge for " + "step size {}", h); + return false; + } + + // The error estimation process uses the implicit trapezoid method, which + // is defined as: + // x(t+h) = x(t) + h/2 (f(t, x(t) + f(t+h, x(t+h)) + // x(t+h) from the Radau method is presumably a good starting point. + + // The error estimate for 2-stage (3rd order) Radau is derived as follows + // (thanks to Michael Sherman): + // x*(t+h) = xᵣ₃(t+h) + O(h⁴) [Radau3] + // = xₜ(t+h) + O(h³) [implicit trapezoid] + // where x*(t+h) is the true (generally unknown) answer that we seek. + // This implies: + // xᵣ₃(t+h) + O(h⁴) = xₜ(t+h) + O(h³) + // Given that the third order term subsumes the fourth order one: + // xᵣ₃(t+h) - xₜ(t+h) = O(h³) + // Therefore the asymptotic term is third order. + + // For 1-stage (1st order) Radau, the error estimate is derived analogously: + // x*(t+h) = xᵣ₁(t+h) + O(h²) [Radau1] + // = xₜ(t+h) + O(h³) [implicit trapezoid] + // By the same reasoning as above, this implies that: + // xᵣ₁(t+h) - xₜ(t+h) = O(h²) + // In this case, the asymptotic term is second order. + + // One subtlety in this analysis is that the first case (with Radau3) gives + // an error estimate for the implicit trapezoid method, while the second case + // gives an error estimate for the Radau1 method. Put another way: the higher + // order result is propagated in the 3rd order method while the lower order + // result is propagated in the 1st order method. + + // Attempt to compute the implicit trapezoid solution. + if (StepImplicitTrapezoid(t0, h, xt0, dx0, *xtplus_radau, xtplus_itr)) { + // Reset the state to that computed by Radau3. + this->get_mutable_context()->SetTimeAndContinuousState( + t0 + h, *xtplus_radau); + return true; + } else { + DRAKE_LOGGER_DEBUG("Implicit trapezoid approach FAILED with a step" + "size that succeeded on Radau3."); + return false; + } + + return true; +} + +// Updates the error estimate from the propagated solution and the embedded +// solution. +template +void RadauIntegrator::ComputeAndSetErrorEstimate( + const VectorX& xtplus_prop, const VectorX& xtplus_embed) { + err_est_vec_ = xtplus_prop - xtplus_embed; + err_est_vec_ = err_est_vec_.cwiseAbs(); + + // Compute and set the error estimate. + DRAKE_LOGGER_DEBUG("Error estimate: {}", err_est_vec_.transpose()); + this->get_mutable_error_estimate()->get_mutable_vector(). + SetFromVector(err_est_vec_); +} + +// Takes a given step of the requested size, if possible. +// @returns `true` if successful. +// @post the time and continuous state will be advanced only if `true` is +// returned (if `false` is returned, the time and state will be reset +// to their values on entry). +template +bool RadauIntegrator::DoImplicitIntegratorStep(const T& h) { + Context* context = this->get_mutable_context(); + + // Save the current time and state. + const T t0 = context->get_time(); + DRAKE_LOGGER_DEBUG("Radau DoStep(h={}) t={}", h, t0); + + xt0_ = context->get_continuous_state().CopyToVector(); + xtplus_prop_.resize(xt0_.size()); + xtplus_embed_.resize(xt0_.size()); + + // If the requested h is less than the minimum step size, we'll advance time + // using an explicit Bogacki-Shampine/explicit Euler step, depending on the + // number of stages in use. + if (h < this->get_working_minimum_step_size()) { + DRAKE_LOGGER_DEBUG("-- requested step too small, taking explicit " + "step instead"); + + // We want to maintain the order of the error estimation process even as we + // take this very small step. + if (num_stages == 2) { + // The BS3 integrator provides exactly the same order as 2-stage + // Radau + embedded implicit trapezoid. + const int evals_before_bs3 = bs3_->get_num_derivative_evaluations(); + DRAKE_DEMAND(bs3_->IntegrateWithSingleFixedStepToTime(t0 + h)); + const int evals_after_bs3 = bs3_->get_num_derivative_evaluations(); + this->get_mutable_error_estimate()->SetFrom(*bs3_->get_error_estimate()); + this->add_derivative_evaluations(evals_after_bs3 - evals_before_bs3); + } else { + // First-order Euler + RK2 provides exactly the same order as 1-stage + // Radau + embedded implicit trapezoid. + DRAKE_DEMAND(num_stages == 1); + + // Compute the Euler step. + xdot_ = this->EvalTimeDerivatives(*context).CopyToVector(); + xtplus_prop_ = xt0_ + h * xdot_; + + // Compute the RK2 step. + const int evals_before_rk2 = rk2_->get_num_derivative_evaluations(); + DRAKE_DEMAND(rk2_->IntegrateWithSingleFixedStepToTime(t0 + h)); + const int evals_after_rk2 = rk2_->get_num_derivative_evaluations(); + + // Update the error estimation ODE counts. + num_err_est_function_evaluations_ += (evals_after_rk2 - evals_before_rk2); + + // Store the embedded solution. + xtplus_embed_ = context->get_continuous_state().CopyToVector(); + + // Reset the state to the propagated solution. + context->SetTimeAndContinuousState(t0 + h, xtplus_prop_); + + // Update the error estimate. + ComputeAndSetErrorEstimate(xtplus_prop_, xtplus_embed_); + } + } else { + // Try taking the requested step. + bool success = AttemptStepPaired( + t0, h, xt0_, &xtplus_prop_, &xtplus_embed_); + + // If the step was not successful, reset the time and state. + if (!success) { + context->SetTimeAndContinuousState(t0, xt0_); + return false; + } + + // Update the error estimate. + ComputeAndSetErrorEstimate(xtplus_prop_, xtplus_embed_); + } + + return true; +} + +// Function for computing the iteration matrix for the Implicit Trapezoid +// method. +template +void RadauIntegrator::ComputeImplicitTrapezoidIterationMatrix( + const MatrixX& J, + const T& h, + typename ImplicitIntegrator::IterationMatrix* iteration_matrix) { + const int n = J.rows(); + // TODO(edrumwri) Investigate how to do the below operation with a move. + iteration_matrix->SetAndFactorIterationMatrix(J * (-h / 2.0) + + MatrixX::Identity(n, n)); +} + +// Function for computing the iteration matrix for the Radau method. This +// is the matrix in [Hairer, 1996] (IV.8.4) on p.119. +template +void RadauIntegrator::ComputeRadauIterationMatrix( + const MatrixX& J, + const T& h, + const MatrixX& A, + typename ImplicitIntegrator::IterationMatrix* iteration_matrix) { + const int n = J.rows() * num_stages; + // TODO(edrumwri) Investigate how to do the below operation with a move. + // Computes I - h A ⊗ J. + iteration_matrix->SetAndFactorIterationMatrix( + CalcTensorProduct(A * -h, J) + MatrixX::Identity(n , n)); +} + +// Computes the tensor product between two matrices. Given +// A = | a11 ... a1m | +// | ... ... | +// | an1 ... anm | +// and some matrix B, the tensor product is: +// A ⊗ B = | a11B ... a1mB | +// | ... ... | +// | an1B ... anmB | +template +MatrixX RadauIntegrator::CalcTensorProduct( + const MatrixX& A, const MatrixX& B) { + const int rows_A = A.rows(); + const int cols_A = A.cols(); + const int rows_B = B.rows(); + const int cols_B = B.cols(); + MatrixX AB(rows_A * rows_B, cols_A * cols_B); + for (int i = 0, ii = 0; i < rows_A; ++i, ii += rows_B) { + for (int j = 0, jj = 0; j < cols_A; ++j, jj += cols_B) { + AB.block(ii, jj, rows_B, cols_B) = A(i, j) * B; + } + } + + return AB; +} + +} // namespace systems +} // namespace drake + +// Define class template initializations for double and AutoDiffXd. +// Note: We don't use the macros in drake/common/default_scalars.h because +// those macros are designed for functions with only one template argument, and +// we need to instantiate both scalar types for both the Radau1 and Radau3 +// integrators, which have num_stages set 1 and 2, respectively. +template class drake::systems::RadauIntegrator; +template class drake::systems::RadauIntegrator; +template class drake::systems::RadauIntegrator; +template class drake::systems::RadauIntegrator; diff --git a/systems/analysis/radau_integrator.h b/systems/analysis/radau_integrator.h index 8ea5f7671995..a857eeb5da5f 100644 --- a/systems/analysis/radau_integrator.h +++ b/systems/analysis/radau_integrator.h @@ -1,14 +1,10 @@ #pragma once -#include #include -#include #include -#include - +#include "drake/common/autodiff.h" #include "drake/common/drake_copyable.h" -#include "drake/math/autodiff_gradient.h" #include "drake/systems/analysis/bogacki_shampine3_integrator.h" #include "drake/systems/analysis/implicit_integrator.h" #include "drake/systems/analysis/runge_kutta2_integrator.h" @@ -218,719 +214,16 @@ class RadauIntegrator final : public ImplicitIntegrator { int64_t num_err_est_nr_iterations_{0}; }; -template -void RadauIntegrator::DoResetImplicitIntegratorStatistics() { - num_nr_iterations_ = 0; - num_err_est_jacobian_reforms_ = 0; - num_err_est_jacobian_function_evaluations_ = 0; - num_err_est_iter_factorizations_ = 0; - num_err_est_function_evaluations_ = 0; - num_err_est_nr_iterations_ = 0; -} - -template -RadauIntegrator::RadauIntegrator(const System& system, - Context* context) : ImplicitIntegrator(system, context) { - A_.resize(num_stages, num_stages); - - // TODO(edrumwri) Convert A_, c_, b_, and d_ to fixed-size when Drake supports - // "if constexpr" (C++17 feature). Lack of partial template function - // specialization makes turning those into fixed-sizes painful at the moment. - - if (num_stages == 2) { - // Set the matrix coefficients (from [Hairer, 1996] Table 5.5). - A_(0, 0) = 5.0/12; A_(0, 1) = -1.0/12; - A_(1, 0) = 3.0/4; A_(1, 1) = 1.0/4; - - // Set the time coefficients (from the same table). - c_ = { 1.0/3, 1.0 }; - - // Set the propagation constants (again, from the same table). - b_ = { 3.0/4, 1.0/4 }; - - // Set the scaling constants for the solution using (8.2b) and Table 5.6. - d_ = { 0.0, 1.0 }; - } else { - // For implicit Euler integration. - A_(0, 0) = 1.0; - c_ = { 1.0 }; - b_ = { 1.0 }; - d_ = { 1.0 }; - } -} - -template -void RadauIntegrator::DoInitialize() { - using std::isnan; - - // Compute the tensor product of A with the identity matrix. A is a - // num_stages x num_stages matrix. We need the tensor product to be a - // m x m-dimensional matrix, where m = num_stages * state_dim. Thus the - // number of rows/columns of the identity matrix is state_dim. - - const int state_dim = - this->get_context().get_continuous_state_vector().size(); - - // Compute A ⊗ I. - // TODO(edrumwri) The resulting matrix only has s²n non-zeros out of s²n² - // elements (where s is the number of stages)- take advantage of this. - A_tp_eye_ = CalcTensorProduct(A_, MatrixX::Identity(state_dim, state_dim)); - - F_of_Z_.resize(state_dim * num_stages); - - // Allocate storage for changes to state variables during Newton-Raphson. - dx_state_ = this->get_system().AllocateTimeDerivatives(); - - // TODO(edrumwri): Find the best values for the method. - // These values are expected to be good for the two-stage and one-stage - // methods, respectively. - const double kDefaultAccuracy = (num_stages == 2) ? 1e-3 : 1e-1; - const double kLoosestAccuracy = (num_stages == 2) ? 1e-2 : 5e-1; - - // Set an artificial step size target, if not set already. - if (isnan(this->get_initial_step_size_target())) { - // Verify that maximum step size has been set. - if (isnan(this->get_maximum_step_size())) { - throw std::logic_error("Neither initial step size target nor maximum " - "step size has been set!"); - } - - this->request_initial_step_size_target( - this->get_maximum_step_size()); - } - - // If the user asks for accuracy that is looser than the loosest this - // integrator can provide, use the integrator's loosest accuracy setting - // instead. - double working_accuracy = this->get_target_accuracy(); - if (isnan(working_accuracy)) - working_accuracy = kDefaultAccuracy; - else if (working_accuracy > kLoosestAccuracy) - working_accuracy = kLoosestAccuracy; - this->set_accuracy_in_use(working_accuracy); - - // Reset the Jacobian matrix (so that recomputation is forced). - this->get_mutable_jacobian().resize(0, 0); - - // Instantiate the embedded third order Bogacki-Shampine3 integrator. Note - // that we do not worry about setting the initial step size, since that code - // will never be triggered (the integrator will always be used in fixed-step - // mode). - bs3_ = std::make_unique>( - this->get_system(), - this->get_mutable_context()); - - // Instantiate the embedded second-order Runge-Kutta integrator. - rk2_ = std::make_unique>( - this->get_system(), - std::numeric_limits::max() /* no maximum step size */, - this->get_mutable_context()); - - // Maximum step size is not to be a constraint. - bs3_->set_maximum_step_size(std::numeric_limits::max()); - - bs3_->Initialize(); - rk2_->Initialize(); - bs3_->set_fixed_step_mode(true); - // Note: RK2 only operates in fixed step mode. -} - -// Computes F(Z) used in [Hairer, 1996], (IV.8.4). This method evaluates -// the time derivatives of the system given the current iterate Z. -// @param t0 the initial time. -// @param h the integration step size to attempt. -// @param xt0 the continuous state at time t0. -// @param Z the current iterate, of dimension state_dim * num_stages. -// @post the state of the internal context will be set to (t0, xt0) on return. -// @return a (state_dim * num_stages)-dimensional vector. -template -const VectorX& RadauIntegrator::ComputeFofZ( - const T& t0, const T& h, const VectorX& xt0, const VectorX& Z) { - Context* context = this->get_mutable_context(); - const int state_dim = xt0.size(); - - // Evaluate the derivative at each stage. - for (int i = 0, j = 0; i < num_stages; ++i, j += state_dim) { - const auto Z_i = Z.segment(j, state_dim); - context->SetTimeAndContinuousState(t0 + c_[i] * h, xt0 + Z_i); - auto F_i = F_of_Z_.segment(j, state_dim); - F_i = this->EvalTimeDerivatives(*context).CopyToVector(); - } - - return F_of_Z_; -} - -// Computes the solution xtplus (i.e., the continuous state at t0 + h) from the -// continuous state at t0 and the current Newton-Raphson iterate (Z). -template -void RadauIntegrator::ComputeSolutionFromIterate( - const VectorX& xt0, const VectorX& Z, VectorX* xtplus) const { - const int state_dim = xt0.size(); - - // Compute the solution using (IV.8.2b) in [Hairer, 1996]. - xtplus->setZero(); - for (int i = 0, j = 0; i < num_stages; ++i, j += state_dim) { - if (d_[i] != 0.0) - *xtplus += d_[i] * Z.segment(j, state_dim); - } - *xtplus += xt0; -} - -// Computes the next continuous state (at t0 + h) using the Radau method, -// assuming that the method is able to converge at that step size. -// @param t0 the initial time. -// @param h the integration step size to attempt. -// @param xt0 the continuous state at time t0. -// @param[out] xtplus the value for x(t+h) on return. -// @param trial the attempt for this approach (1-4). StepRadau() uses more -// computationally expensive methods as the trial numbers increase. -// @post the internal context will be in an indeterminate state on returning -// `false`. -// @returns `true` if the method was successfully able to take an integration -// step of size `h`. -template -bool RadauIntegrator::StepRadau(const T& t0, const T& h, - const VectorX& xt0, VectorX* xtplus, int trial) { - using std::max; - using std::min; - - // Compute the time at the end of the step. - const T tf = t0 + h; - - // Verify the trial number is valid. - DRAKE_ASSERT(1 <= trial && trial <= 4); - - // Set the state. - Context* context = this->get_mutable_context(); - context->SetTimeAndContinuousState(t0, xt0); - - const int state_dim = xt0.size(); - - // Verify xtplus - DRAKE_ASSERT(xtplus && xtplus->size() == state_dim); - - DRAKE_LOGGER_DEBUG("StepRadau() entered for t={}, h={}, trial={}", - t0, h, trial); - - // TODO(edrumwri) Experiment with setting this as recommended in - // [Hairer, 1996], p. 120. - // Initialize the z iterate using (IV.8.5) in [Hairer, 1996], p. 120 (and - // the corresponding xt+). - Z_.setZero(state_dim * num_stages); - *xtplus = xt0; - DRAKE_LOGGER_DEBUG("Starting state: {}", xtplus->transpose()); - - // Set the iteration matrix construction method. - auto construct_iteration_matrix = [this](const MatrixX& J, const T& dt, - typename ImplicitIntegrator::IterationMatrix* iteration_matrix) { - ComputeRadauIterationMatrix(J, dt, this->A_, iteration_matrix); - }; - - // Calculate Jacobian and iteration matrices (and factorizations), as needed, - // around (t0, xt0). We do not do this calculation if full Newton is in use; - // the calculation will be performed at the beginning of the loop - // instead. - - // TODO(edrumwri) Consider computing the Jacobian matrix around tf and/or - // xtplus. This would give a better Jacobian, but would - // complicate the logic, since the Jacobian would no longer - // (necessarily) be fresh upon fallback to a smaller step size. - if (!this->get_use_full_newton() && - !this->MaybeFreshenMatrices(t0, xt0, h, trial, construct_iteration_matrix, - &iteration_matrix_radau_)) { - return false; - } - - // Initialize the "last" norm of dx; this will be used to detect convergence. - T last_dx_norm = std::numeric_limits::infinity(); - - // Do the Newton-Raphson iterations. - for (int iter = 0; iter < this->max_newton_raphson_iterations(); ++iter) { - DRAKE_LOGGER_DEBUG("Newton-Raphson iteration {}", iter); - - this->FreshenMatricesIfFullNewton( - tf, *xtplus, h, construct_iteration_matrix, &iteration_matrix_radau_); - - // Update the number of Newton-Raphson iterations. - ++num_nr_iterations_; - - // Evaluate the derivatives using the current iterate. - const VectorX& F_of_Z = ComputeFofZ(t0, h, xt0, Z_); - - // Compute the state update using (IV.8.4) in [Hairer, 1996], p. 119, i.e.: - // Solve (I − hA⊗J) ΔZᵏ = h (A⊗I) F(Zᵏ) - Zᵏ for ΔZᵏ, where: - // A_tp_eye ≡ (A⊗I) and (I − hA⊗J) is the iteration matrix. - DRAKE_LOGGER_DEBUG("residual: {}", - (A_tp_eye_ * (h * F_of_Z) - Z_).transpose()); - VectorX dZ = iteration_matrix_radau_.Solve( - A_tp_eye_ * (h * F_of_Z) - Z_); - - // Update the iterate. - Z_ += dZ; - - // Compute the update to the actual continuous state (i.e., x not Z) using - // (IV.8.2b) in [Hairer, 1996], which gives the relationship between x(t0+h) - // and Z: - // x(t0+h) = x(t0) + Σ dᵢ Zᵢ - // Therefore, we can get the relationship between dZ and dx as: - // x* = x(t0) + Σ dᵢ Zᵢ (1) - // x+ = x(t0) + Σ dᵢ (Zᵢ + dZᵢ) (2) - // Subtracting (1) from (2) yields - // dx = Σ dᵢ Zᵢ - // where dx ≡ x+ - x* - VectorX dx = VectorX::Zero(state_dim); - for (int i = 0, j = 0; i < num_stages; ++i, j += state_dim) { - if (d_[i] != 0.0) - dx += d_[i] * dZ.segment(j, state_dim); - } - - dx_state_->SetFromVector(dx); - DRAKE_LOGGER_DEBUG("dx: {}", dx.transpose()); - - // Get the infinity norm of the weighted update vector. - dx_state_->get_mutable_vector().SetFromVector(dx); - T dx_norm = this->CalcStateChangeNorm(*dx_state_); - - // Compute the update. - ComputeSolutionFromIterate(xt0, Z_, &(*xtplus)); - - // Check for Newton-Raphson convergence. - typename ImplicitIntegrator::ConvergenceStatus status = - this->CheckNewtonConvergence(iter, *xtplus, dx, dx_norm, last_dx_norm); - // If it converged, we're done. - if (status == ImplicitIntegrator::ConvergenceStatus::kConverged) - return true; - // If it diverged, we have to abort and try again. - if (status == ImplicitIntegrator::ConvergenceStatus::kDiverged) - break; - // Otherwise, continue to the next Newton-Raphson iteration. - DRAKE_DEMAND(status == - ImplicitIntegrator::ConvergenceStatus::kNotConverged); - - // Update the norm of the state update. - last_dx_norm = dx_norm; - } - - DRAKE_LOGGER_DEBUG("StepRadau() convergence failed"); - - // If Jacobian and iteration matrix factorizations are not reused, there - // is nothing else we can try; otherwise, the following code will recurse - // into this function again, and freshen computations as helpful. Note that - // get_reuse() returns false if "full Newton-Raphson" mode is activated (see - // ImplicitIntegrator::get_use_full_newton()). - if (!this->get_reuse()) - return false; - - // Try StepRadau again, freshening Jacobians and iteration matrix - // factorizations as helpful. - return StepRadau(t0, h, xt0, xtplus, trial+1); -} - -// Computes the next continuous state (at t0 + h) using the implicit trapezoid -// method, assuming that the method is able to converge at that step size. -// @param t0 the initial time. -// @param h the integration step size to attempt. -// @param xt0 the continuous state at time t0. -// @param radau_xtplus the Radau solution for x(t+h). -// @param[out] xtplus the value for x(t+h) on return. -// @param trial the attempt for this approach (1-4). The method uses more -// computationally expensive methods as the trial numbers increase. -// @returns `true` if the method was successfully able to take an integration -// step of size `h` (or `false` otherwise). -template -bool RadauIntegrator::StepImplicitTrapezoid(const T& t0, - const T& h, const VectorX& xt0, const VectorX& dx0, - const VectorX& radau_xtplus, VectorX* xtplus) { - using std::abs; - - DRAKE_LOGGER_DEBUG("StepImplicitTrapezoid(h={}) t={}", - h, t0); - - // Define g(x(t+h)) ≡ x(t+h) - x(t) - h/2 (f(t,x(t)) + f(t+h,x(t+h)) and - // evaluate it at the current x(t+h). - Context* context = this->get_mutable_context(); - std::function()> g = - [&xt0, h, &dx0, context, this]() { - return (context->get_continuous_state().CopyToVector() - xt0 - h/2 * - (dx0 + this->EvalTimeDerivatives( - this->get_context()).CopyToVector())).eval(); - }; - - // Store statistics before calling StepAbstract(). The difference between - // the modified statistics and the stored statistics will be used to compute - // the trapezoid method-specific statistics. - int stored_num_jacobian_evaluations = this->get_num_jacobian_evaluations(); - int stored_num_iter_factorizations = - this->get_num_iteration_matrix_factorizations(); - int64_t stored_num_function_evaluations = - this->get_num_derivative_evaluations(); - int64_t stored_num_jacobian_function_evaluations = - this->get_num_derivative_evaluations_for_jacobian(); - int stored_num_nr_iterations = this->get_num_newton_raphson_iterations(); - - // Step. - bool success = StepImplicitTrapezoidDetail( - t0, h, xt0, g, radau_xtplus, xtplus); - - // Move statistics to implicit trapezoid-specific. - num_err_est_jacobian_reforms_ += - this->get_num_jacobian_evaluations() - stored_num_jacobian_evaluations; - num_err_est_iter_factorizations_ += - this->get_num_iteration_matrix_factorizations() - - stored_num_iter_factorizations; - num_err_est_function_evaluations_ += - this->get_num_derivative_evaluations() - stored_num_function_evaluations; - num_err_est_jacobian_function_evaluations_ += - this->get_num_derivative_evaluations_for_jacobian() - - stored_num_jacobian_function_evaluations; - num_err_est_nr_iterations_ += this->get_num_newton_raphson_iterations() - - stored_num_nr_iterations; - - return success; -} - -// Does all of the real work for the implicit trapezoid method. -template -bool RadauIntegrator::StepImplicitTrapezoidDetail( - const T& t0, const T& h, - const VectorX& xt0, const std::function()>& g, - const VectorX& radau_xtplus, VectorX* xtplus, int trial) { - using std::max; - using std::min; - - // Verify the trial number is valid. - DRAKE_ASSERT(trial >= 1 && trial <= 4); - - // Verify xtplus. - Context* context = this->get_mutable_context(); - DRAKE_ASSERT(xtplus && - xtplus->size() == context->get_continuous_state_vector().size()); - - // Start from the Radau solution, which is close (either O(h³) accurate or - // O(h) accurate, depending on the number of stages) to the true solution and - // hence should be an excellent starting point. - *xtplus = radau_xtplus; - DRAKE_LOGGER_DEBUG("Starting state: {}", xtplus->transpose()); - - DRAKE_LOGGER_DEBUG("StepImplicitTrapezoidDetail() entered for t={}, " - "h={}, trial={}", t0, h, trial); - - // Advance the context time; this means that all derivatives will be computed - // at t+h. Compare against StepRadau, which uses ComputeFofZ (which - // automatically updates the Context to the correct time and state). - const T tf = t0 + h; - context->SetTimeAndContinuousState(tf, *xtplus); - - // Initialize the "last" state update norm; this will be used to detect - // convergence. - T last_dx_norm = std::numeric_limits::infinity(); - - // TODO(edrumwri) Consider computing the Jacobian matrix around tf. - // Calculate Jacobian and iteration matrices (and factorizations), as needed. - // TODO(edrumwri) Consider computing the Jacobian matrix around xtplus. This - // would give a better Jacobian, but would complicate the - // logic, since the Jacobian would no longer (necessarily) be - // fresh upon fallback to a smaller step size. - if (!this->get_use_full_newton() && - !this->MaybeFreshenMatrices(t0, xt0, h, trial, - ComputeImplicitTrapezoidIterationMatrix, - &iteration_matrix_implicit_trapezoid_)) { - return false; - } - - for (int iter = 0; iter < this->max_newton_raphson_iterations(); ++iter) { - DRAKE_LOGGER_DEBUG("Newton-Raphson iteration {}", iter); - ++num_nr_iterations_; - - this->FreshenMatricesIfFullNewton(tf, *xtplus, h, - ComputeImplicitTrapezoidIterationMatrix, - &iteration_matrix_implicit_trapezoid_); - - - // Evaluate the residual error using the current x(t+h). - VectorX goutput = g(); - - // Compute the state update using the equation A*x = -g(), where A is the - // iteration matrix. - // TODO(edrumwri): Allow caller to provide their own solver. - VectorX dx = iteration_matrix_implicit_trapezoid_.Solve(-goutput); - DRAKE_LOGGER_DEBUG("dx: {}", dx.transpose()); - - // Get the infinity norm of the weighted update vector. - dx_state_->get_mutable_vector().SetFromVector(dx); - T dx_norm = this->CalcStateChangeNorm(*dx_state_); - - // Update the state vector. - *xtplus += dx; - context->SetTimeAndContinuousState(tf, *xtplus); - - // Check for Newton-Raphson convergence. - typename ImplicitIntegrator::ConvergenceStatus status = - this->CheckNewtonConvergence(iter, *xtplus, dx, dx_norm, last_dx_norm); - // If it converged, we're done. - if (status == ImplicitIntegrator::ConvergenceStatus::kConverged) - return true; - // If it diverged, we have to abort and try again. - if (status == ImplicitIntegrator::ConvergenceStatus::kDiverged) - break; - // Otherwise, continue to the next Newton-Raphson iteration. - DRAKE_DEMAND(status == - ImplicitIntegrator::ConvergenceStatus::kNotConverged); - - // Update the norm of the state update. - last_dx_norm = dx_norm; - } - - DRAKE_LOGGER_DEBUG("StepImplicitTrapezoidDetail() convergence " - "failed"); - - // If Jacobian and iteration matrix factorizations are not reused, there - // is nothing else we can try. Note that get_reuse() returns false if - // "full Newton-Raphson" mode is activated (see - // ImplicitIntegrator::get_use_full_newton()). - if (!this->get_reuse()) - return false; - - // Try the step again, freshening Jacobians and iteration matrix - // factorizations as helpful. - return StepImplicitTrapezoidDetail( - t0, h, xt0, g, radau_xtplus, xtplus, trial + 1); -} - -// Steps Radau forward by h, if possible. -// @param t0 the initial time. -// @param h the integration step size to attempt. -// @param xt0 the continuous state at time t0. -// @param[out] xtplus_radau contains the Radau integrator solution on return. -// @param[out] xtplus_itr contains the implicit trapezoid solution on return. -// @returns `true` if the integration was successful at the requested step size. -// @pre The time and state in the system's context (stored by the integrator) -// are set to (t0, xt0) on entry. -// @post The time and state of the system's context (stored by the integrator) -// will be set to t0+h and `xtplus_radau` on successful exit (indicated -// by this function returning `true`) and will be indeterminate on -// unsuccessful exit (indicated by this function returning `false`). -template -bool RadauIntegrator::AttemptStepPaired(const T& t0, const T& h, - const VectorX& xt0, VectorX* xtplus_radau, VectorX* xtplus_itr) { - using std::abs; - DRAKE_ASSERT(xtplus_radau); - DRAKE_ASSERT(xtplus_itr); - DRAKE_ASSERT(xtplus_radau->size() == xt0.size()); - DRAKE_ASSERT(xtplus_itr->size() == xt0.size()); - - // Set the time and state in the context. - this->get_mutable_context()->SetTimeAndContinuousState(t0, xt0); - - // Compute the derivative at xt0. NOTE: the derivative is calculated at this - // point (early on in the integration process) in order to reuse the - // derivative evaluation, via the cache, from the last integration step (if - // possible). - const VectorX dx0 = this->EvalTimeDerivatives( - this->get_context()).CopyToVector(); - - // Use the current state as the candidate value for the next state. - // [Hairer 1996] validates this choice (p. 120). - *xtplus_radau = xt0; - - // Do the Radau step. - if (!StepRadau(t0, h, xt0, xtplus_radau)) { - DRAKE_LOGGER_DEBUG("Radau approach did not converge for " - "step size {}", h); - return false; - } - - // The error estimation process uses the implicit trapezoid method, which - // is defined as: - // x(t+h) = x(t) + h/2 (f(t, x(t) + f(t+h, x(t+h)) - // x(t+h) from the Radau method is presumably a good starting point. - - // The error estimate for 2-stage (3rd order) Radau is derived as follows - // (thanks to Michael Sherman): - // x*(t+h) = xᵣ₃(t+h) + O(h⁴) [Radau3] - // = xₜ(t+h) + O(h³) [implicit trapezoid] - // where x*(t+h) is the true (generally unknown) answer that we seek. - // This implies: - // xᵣ₃(t+h) + O(h⁴) = xₜ(t+h) + O(h³) - // Given that the third order term subsumes the fourth order one: - // xᵣ₃(t+h) - xₜ(t+h) = O(h³) - // Therefore the asymptotic term is third order. - - // For 1-stage (1st order) Radau, the error estimate is derived analogously: - // x*(t+h) = xᵣ₁(t+h) + O(h²) [Radau1] - // = xₜ(t+h) + O(h³) [implicit trapezoid] - // By the same reasoning as above, this implies that: - // xᵣ₁(t+h) - xₜ(t+h) = O(h²) - // In this case, the asymptotic term is second order. - - // One subtlety in this analysis is that the first case (with Radau3) gives - // an error estimate for the implicit trapezoid method, while the second case - // gives an error estimate for the Radau1 method. Put another way: the higher - // order result is propagated in the 3rd order method while the lower order - // result is propagated in the 1st order method. - - // Attempt to compute the implicit trapezoid solution. - if (StepImplicitTrapezoid(t0, h, xt0, dx0, *xtplus_radau, xtplus_itr)) { - // Reset the state to that computed by Radau3. - this->get_mutable_context()->SetTimeAndContinuousState( - t0 + h, *xtplus_radau); - return true; - } else { - DRAKE_LOGGER_DEBUG("Implicit trapezoid approach FAILED with a step" - "size that succeeded on Radau3."); - return false; - } - - return true; -} - -// Updates the error estimate from the propagated solution and the embedded -// solution. -template -void RadauIntegrator::ComputeAndSetErrorEstimate( - const VectorX& xtplus_prop, const VectorX& xtplus_embed) { - err_est_vec_ = xtplus_prop - xtplus_embed; - err_est_vec_ = err_est_vec_.cwiseAbs(); - - // Compute and set the error estimate. - DRAKE_LOGGER_DEBUG("Error estimate: {}", err_est_vec_.transpose()); - this->get_mutable_error_estimate()->get_mutable_vector(). - SetFromVector(err_est_vec_); -} - -/// Takes a given step of the requested size, if possible. -/// @returns `true` if successful. -/// @post the time and continuous state will be advanced only if `true` is -/// returned (if `false` is returned, the time and state will be reset -/// to their values on entry). -template -bool RadauIntegrator::DoImplicitIntegratorStep(const T& h) { - Context* context = this->get_mutable_context(); - - // Save the current time and state. - const T t0 = context->get_time(); - DRAKE_LOGGER_DEBUG("Radau DoStep(h={}) t={}", h, t0); - - xt0_ = context->get_continuous_state().CopyToVector(); - xtplus_prop_.resize(xt0_.size()); - xtplus_embed_.resize(xt0_.size()); - - // If the requested h is less than the minimum step size, we'll advance time - // using an explicit Bogacki-Shampine/explicit Euler step, depending on the - // number of stages in use. - if (h < this->get_working_minimum_step_size()) { - DRAKE_LOGGER_DEBUG("-- requested step too small, taking explicit " - "step instead"); - - // We want to maintain the order of the error estimation process even as we - // take this very small step. - if (num_stages == 2) { - // The BS3 integrator provides exactly the same order as 2-stage - // Radau + embedded implicit trapezoid. - const int evals_before_bs3 = bs3_->get_num_derivative_evaluations(); - DRAKE_DEMAND(bs3_->IntegrateWithSingleFixedStepToTime(t0 + h)); - const int evals_after_bs3 = bs3_->get_num_derivative_evaluations(); - this->get_mutable_error_estimate()->SetFrom(*bs3_->get_error_estimate()); - this->add_derivative_evaluations(evals_after_bs3 - evals_before_bs3); - } else { - // First-order Euler + RK2 provides exactly the same order as 1-stage - // Radau + embedded implicit trapezoid. - DRAKE_DEMAND(num_stages == 1); - - // Compute the Euler step. - xdot_ = this->EvalTimeDerivatives(*context).CopyToVector(); - xtplus_prop_ = xt0_ + h * xdot_; - - // Compute the RK2 step. - const int evals_before_rk2 = rk2_->get_num_derivative_evaluations(); - DRAKE_DEMAND(rk2_->IntegrateWithSingleFixedStepToTime(t0 + h)); - const int evals_after_rk2 = rk2_->get_num_derivative_evaluations(); - - // Update the error estimation ODE counts. - num_err_est_function_evaluations_ += (evals_after_rk2 - evals_before_rk2); - - // Store the embedded solution. - xtplus_embed_ = context->get_continuous_state().CopyToVector(); - - // Reset the state to the propagated solution. - context->SetTimeAndContinuousState(t0 + h, xtplus_prop_); - - // Update the error estimate. - ComputeAndSetErrorEstimate(xtplus_prop_, xtplus_embed_); - } - } else { - // Try taking the requested step. - bool success = AttemptStepPaired( - t0, h, xt0_, &xtplus_prop_, &xtplus_embed_); - - // If the step was not successful, reset the time and state. - if (!success) { - context->SetTimeAndContinuousState(t0, xt0_); - return false; - } - - // Update the error estimate. - ComputeAndSetErrorEstimate(xtplus_prop_, xtplus_embed_); - } - - return true; -} - -// Function for computing the iteration matrix for the Implicit Trapezoid -// method. -template -void RadauIntegrator::ComputeImplicitTrapezoidIterationMatrix( - const MatrixX& J, - const T& h, - typename ImplicitIntegrator::IterationMatrix* iteration_matrix) { - const int n = J.rows(); - // TODO(edrumwri) Investigate how to do the below operation with a move. - iteration_matrix->SetAndFactorIterationMatrix(J * (-h / 2.0) + - MatrixX::Identity(n, n)); -} - -// Function for computing the iteration matrix for the Radau method. This -// is the matrix in [Hairer, 1996] (IV.8.4) on p.119. -template -void RadauIntegrator::ComputeRadauIterationMatrix( - const MatrixX& J, - const T& h, - const MatrixX& A, - typename ImplicitIntegrator::IterationMatrix* iteration_matrix) { - const int n = J.rows() * num_stages; - // TODO(edrumwri) Investigate how to do the below operation with a move. - // Computes I - h A ⊗ J. - iteration_matrix->SetAndFactorIterationMatrix( - CalcTensorProduct(A * -h, J) + MatrixX::Identity(n , n)); -} - -// Computes the tensor product between two matrices. Given -// A = | a11 ... a1m | -// | ... ... | -// | an1 ... anm | -// and some matrix B, the tensor product is: -// A ⊗ B = | a11B ... a1mB | -// | ... ... | -// | an1B ... anmB | -template -MatrixX RadauIntegrator::CalcTensorProduct( - const MatrixX& A, const MatrixX& B) { - const int rows_A = A.rows(); - const int cols_A = A.cols(); - const int rows_B = B.rows(); - const int cols_B = B.cols(); - MatrixX AB(rows_A * rows_B, cols_A * cols_B); - for (int i = 0, ii = 0; i < rows_A; ++i, ii += rows_B) { - for (int j = 0, jj = 0; j < cols_A; ++j, jj += cols_B) { - AB.block(ii, jj, rows_B, cols_B) = A(i, j) * B; - } - } - - return AB; -} - } // namespace systems } // namespace drake -DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( - class ::drake::systems::RadauIntegrator) +// Declare class template initializations for double and AutoDiffXd. +// Note: We don't use the macros in drake/common/default_scalars.h because +// those macros are designed for functions with only one template argument, and +// we need to instantiate both scalar types for both the Radau1 and Radau3 +// integrators, which have num_stages set 1 and 2, respectively. +extern template class drake::systems::RadauIntegrator; +extern template class drake::systems::RadauIntegrator; + +extern template class drake::systems::RadauIntegrator; +extern template class drake::systems::RadauIntegrator; diff --git a/systems/analysis/runge_kutta3_integrator-inl.h b/systems/analysis/runge_kutta3_integrator-inl.h deleted file mode 100644 index 9e8772e61e41..000000000000 --- a/systems/analysis/runge_kutta3_integrator-inl.h +++ /dev/null @@ -1,163 +0,0 @@ -#pragma once - -/// @file -/// Template method implementations for runge_kutta_3_integrator.h. -/// Most users should only include that file, not this one. -/// For background, see https://drake.mit.edu/cxx_inl.html. - -/* clang-format off to disable clang-format-includes */ -#include "drake/systems/analysis/runge_kutta3_integrator.h" -/* clang-format on */ - -#include - -namespace drake { -namespace systems { - -/** - * RK3-specific initialization function. - * @throws std::logic_error if *neither* the initial step size target nor the - * maximum step size have been set before calling. - */ -template -void RungeKutta3Integrator::DoInitialize() { - using std::isnan; - const double kDefaultAccuracy = 1e-3; // Good for this particular integrator. - const double kLoosestAccuracy = 1e-1; // Integrator specific. - const double kMaxStepFraction = 0.1; // Fraction of max step size for - // less aggressive first step. - - // Set an artificial step size target, if not set already. - if (isnan(this->get_initial_step_size_target())) { - // Verify that maximum step size has been set. - if (isnan(this->get_maximum_step_size())) - throw std::logic_error("Neither initial step size target nor maximum " - "step size has been set!"); - - this->request_initial_step_size_target( - this->get_maximum_step_size() * kMaxStepFraction); - } - - // Sets the working accuracy to a good value. - double working_accuracy = this->get_target_accuracy(); - - // If the user asks for accuracy that is looser than the loosest this - // integrator can provide, use the integrator's loosest accuracy setting - // instead. - if (working_accuracy > kLoosestAccuracy) - working_accuracy = kLoosestAccuracy; - else if (isnan(working_accuracy)) - working_accuracy = kDefaultAccuracy; - this->set_accuracy_in_use(working_accuracy); -} - -template -bool RungeKutta3Integrator::DoStep(const T& h) { - using std::abs; - Context& context = *this->get_mutable_context(); - const T t0 = context.get_time(); - const T t1 = t0 + h; - - // CAUTION: This is performance-sensitive inner loop code that uses dangerous - // long-lived references into state and cache to avoid unnecessary copying and - // cache invalidation. Be careful not to insert calls to methods that could - // invalidate any of these references before they are used. - - // TODO(sherm1) Consider moving this notation description to IntegratorBase - // when it is more widely adopted. - // Notation: we're using numeric subscripts for times t₀ and t₁, and - // lower-case letter superscripts like t⁽ᵃ⁾ and t⁽ᵇ⁾ to indicate values - // for intermediate stages of which there are two here, a and b. - // State x₀ = {xc₀, xd₀, xa₀}. We modify only t and xc here, but - // derivative calculations depend on everything in the context, including t, - // x and inputs u (which may depend on t and x). - // Define x⁽ᵃ⁾ ≜ {xc⁽ᵃ⁾, xd₀, xa₀} and u⁽ᵃ⁾ ≜ u(t⁽ᵃ⁾, x⁽ᵃ⁾). - - // Evaluate derivative xcdot₀ ← xcdot(t₀, x(t₀), u(t₀)). Copy the result - // into a temporary since we'll be calculating more derivatives below. - derivs0_->get_mutable_vector().SetFrom( - this->EvalTimeDerivatives(context).get_vector()); - const VectorBase& xcdot0 = derivs0_->get_vector(); - - // Cache: xcdot0 references a *copy* of the derivative result so is immune - // to subsequent evaluations. - - // Compute the first intermediate state and derivative - // (at t⁽ᵃ⁾=t₀+h/2, x⁽ᵃ⁾, u⁽ᵃ⁾). - - // This call marks t- and xc-dependent cache entries out of date, including - // the derivative cache entry. Note that xc is a live reference into the - // context -- subsequent changes through that reference are unobservable so - // will require manual out-of-date notifications. - VectorBase& xc = context.SetTimeAndGetMutableContinuousStateVector( - t0 + h / 2); // t⁽ᵃ⁾ ← t₀ + h/2 - xc.CopyToPreSizedVector(&save_xc0_); // Save xc₀ while we can. - xc.PlusEqScaled(h / 2, xcdot0); // xc⁽ᵃ⁾ ← xc₀ + h/2 xcdot₀ - - derivs1_->get_mutable_vector().SetFrom( - this->EvalTimeDerivatives(context).get_vector()); - const VectorBase& xcdot_a = derivs1_->get_vector(); // xcdot⁽ᵃ⁾ - - // Cache: xcdot_a references a *copy* of the derivative result so is immune - // to subsequent evaluations. - - // Compute the second intermediate state and derivative - // (at t⁽ᵇ⁾=t₁, x⁽ᵇ⁾, u⁽ᵇ⁾). - - // This call marks t- and xc-dependent cache entries out of date, including - // the derivative cache entry. (We already have the xc reference but must - // issue the out-of-date notification here since we're about to change it.) - context.SetTimeAndNoteContinuousStateChange(t1); - - // xcⱼ ← xc₀ - h xcdot₀ + 2 h xcdot⁽ᵃ⁾ - xc.SetFromVector(save_xc0_); // Restore xc ← xc₀. - xc.PlusEqScaled({{-h, xcdot0}, {2 * h, xcdot_a}}); - - const VectorBase& xcdot_b = // xcdot⁽ᵇ⁾ - this->EvalTimeDerivatives(context).get_vector(); - - // Cache: xcdot_b references the live derivative cache value, currently - // up to date but about to be marked out of date. We do not want to make - // an unnecessary copy of this data. - - // Cache: we're about to write through the xc reference again, so need to - // mark xc-dependent cache entries out of date, including xcdot_b; time - // doesn't change here. - context.NoteContinuousStateChange(); - - // Calculate the final O(h³) state at t₁. - // xc₁ ← xc₀ + h/6 xcdot₀ + 2/3 h xcdot⁽ᵃ⁾ + h/6 xcdot⁽ᵇ⁾ - xc.SetFromVector(save_xc0_); // Restore xc ← xc₀. - const T h6 = h / 6.0; - - // Cache: xcdot_b still references the derivative cache value, which is - // unchanged, although it is marked out of date. xcdot0 and xcdot_a are - // unaffected. - xc.PlusEqScaled({{h6, xcdot0}, - {4 * h6, xcdot_a}, - {h6, xcdot_b}}); - - // If the size of the system has changed, the error estimate will no longer - // be sized correctly. Verify that the error estimate is the correct size. - DRAKE_DEMAND(this->get_error_estimate()->size() == xc.size()); - - // Calculate the error estimate using an Eigen vector then copy it to the - // continuous state vector, where the various state components can be - // analyzed. - // ε = | xc₁ - (xc₀ + h xcdot⁽ᵃ⁾) | = | xc₀ + h xcdot⁽ᵃ⁾ - xc₁ | - - // TODO(sherm1) Set err_est_vec_ to xc0 at the start and use it above to - // avoid the need for save_xc0_ and this copy altogether. - err_est_vec_ = save_xc0_; // ε ← xc₀ - - xcdot_a.ScaleAndAddToVector(h, &err_est_vec_); // ε += h xcdot⁽ᵃ⁾ - xc.ScaleAndAddToVector(-1.0, &err_est_vec_); // ε -= xc₁ - err_est_vec_ = err_est_vec_.cwiseAbs(); - this->get_mutable_error_estimate()->SetFromVector(err_est_vec_); - - // RK3 always succeeds in taking its desired step. - return true; -} - -} // namespace systems -} // namespace drake diff --git a/systems/analysis/runge_kutta3_integrator.cc b/systems/analysis/runge_kutta3_integrator.cc index 144263d43a33..3cb5e66c61a0 100644 --- a/systems/analysis/runge_kutta3_integrator.cc +++ b/systems/analysis/runge_kutta3_integrator.cc @@ -1,11 +1,155 @@ #include "drake/systems/analysis/runge_kutta3_integrator.h" -#include "drake/systems/analysis/runge_kutta3_integrator-inl.h" - -#include "drake/common/autodiff.h" namespace drake { namespace systems { -template class RungeKutta3Integrator; -template class RungeKutta3Integrator; + +/* + * RK3-specific initialization function. + * @throws std::logic_error if *neither* the initial step size target nor the + * maximum step size have been set before calling. + */ +template +void RungeKutta3Integrator::DoInitialize() { + using std::isnan; + const double kDefaultAccuracy = 1e-3; // Good for this particular integrator. + const double kLoosestAccuracy = 1e-1; // Integrator specific. + const double kMaxStepFraction = 0.1; // Fraction of max step size for + // less aggressive first step. + + // Set an artificial step size target, if not set already. + if (isnan(this->get_initial_step_size_target())) { + // Verify that maximum step size has been set. + if (isnan(this->get_maximum_step_size())) + throw std::logic_error("Neither initial step size target nor maximum " + "step size has been set!"); + + this->request_initial_step_size_target( + this->get_maximum_step_size() * kMaxStepFraction); + } + + // Sets the working accuracy to a good value. + double working_accuracy = this->get_target_accuracy(); + + // If the user asks for accuracy that is looser than the loosest this + // integrator can provide, use the integrator's loosest accuracy setting + // instead. + if (working_accuracy > kLoosestAccuracy) + working_accuracy = kLoosestAccuracy; + else if (isnan(working_accuracy)) + working_accuracy = kDefaultAccuracy; + this->set_accuracy_in_use(working_accuracy); +} + +template +bool RungeKutta3Integrator::DoStep(const T& h) { + using std::abs; + Context& context = *this->get_mutable_context(); + const T t0 = context.get_time(); + const T t1 = t0 + h; + + // CAUTION: This is performance-sensitive inner loop code that uses dangerous + // long-lived references into state and cache to avoid unnecessary copying and + // cache invalidation. Be careful not to insert calls to methods that could + // invalidate any of these references before they are used. + + // TODO(sherm1) Consider moving this notation description to IntegratorBase + // when it is more widely adopted. + // Notation: we're using numeric subscripts for times t₀ and t₁, and + // lower-case letter superscripts like t⁽ᵃ⁾ and t⁽ᵇ⁾ to indicate values + // for intermediate stages of which there are two here, a and b. + // State x₀ = {xc₀, xd₀, xa₀}. We modify only t and xc here, but + // derivative calculations depend on everything in the context, including t, + // x and inputs u (which may depend on t and x). + // Define x⁽ᵃ⁾ ≜ {xc⁽ᵃ⁾, xd₀, xa₀} and u⁽ᵃ⁾ ≜ u(t⁽ᵃ⁾, x⁽ᵃ⁾). + + // Evaluate derivative xcdot₀ ← xcdot(t₀, x(t₀), u(t₀)). Copy the result + // into a temporary since we'll be calculating more derivatives below. + derivs0_->get_mutable_vector().SetFrom( + this->EvalTimeDerivatives(context).get_vector()); + const VectorBase& xcdot0 = derivs0_->get_vector(); + + // Cache: xcdot0 references a *copy* of the derivative result so is immune + // to subsequent evaluations. + + // Compute the first intermediate state and derivative + // (at t⁽ᵃ⁾=t₀+h/2, x⁽ᵃ⁾, u⁽ᵃ⁾). + + // This call marks t- and xc-dependent cache entries out of date, including + // the derivative cache entry. Note that xc is a live reference into the + // context -- subsequent changes through that reference are unobservable so + // will require manual out-of-date notifications. + VectorBase& xc = context.SetTimeAndGetMutableContinuousStateVector( + t0 + h / 2); // t⁽ᵃ⁾ ← t₀ + h/2 + xc.CopyToPreSizedVector(&save_xc0_); // Save xc₀ while we can. + xc.PlusEqScaled(h / 2, xcdot0); // xc⁽ᵃ⁾ ← xc₀ + h/2 xcdot₀ + + derivs1_->get_mutable_vector().SetFrom( + this->EvalTimeDerivatives(context).get_vector()); + const VectorBase& xcdot_a = derivs1_->get_vector(); // xcdot⁽ᵃ⁾ + + // Cache: xcdot_a references a *copy* of the derivative result so is immune + // to subsequent evaluations. + + // Compute the second intermediate state and derivative + // (at t⁽ᵇ⁾=t₁, x⁽ᵇ⁾, u⁽ᵇ⁾). + + // This call marks t- and xc-dependent cache entries out of date, including + // the derivative cache entry. (We already have the xc reference but must + // issue the out-of-date notification here since we're about to change it.) + context.SetTimeAndNoteContinuousStateChange(t1); + + // xcⱼ ← xc₀ - h xcdot₀ + 2 h xcdot⁽ᵃ⁾ + xc.SetFromVector(save_xc0_); // Restore xc ← xc₀. + xc.PlusEqScaled({{-h, xcdot0}, {2 * h, xcdot_a}}); + + const VectorBase& xcdot_b = // xcdot⁽ᵇ⁾ + this->EvalTimeDerivatives(context).get_vector(); + + // Cache: xcdot_b references the live derivative cache value, currently + // up to date but about to be marked out of date. We do not want to make + // an unnecessary copy of this data. + + // Cache: we're about to write through the xc reference again, so need to + // mark xc-dependent cache entries out of date, including xcdot_b; time + // doesn't change here. + context.NoteContinuousStateChange(); + + // Calculate the final O(h³) state at t₁. + // xc₁ ← xc₀ + h/6 xcdot₀ + 2/3 h xcdot⁽ᵃ⁾ + h/6 xcdot⁽ᵇ⁾ + xc.SetFromVector(save_xc0_); // Restore xc ← xc₀. + const T h6 = h / 6.0; + + // Cache: xcdot_b still references the derivative cache value, which is + // unchanged, although it is marked out of date. xcdot0 and xcdot_a are + // unaffected. + xc.PlusEqScaled({{h6, xcdot0}, + {4 * h6, xcdot_a}, + {h6, xcdot_b}}); + + // If the size of the system has changed, the error estimate will no longer + // be sized correctly. Verify that the error estimate is the correct size. + DRAKE_DEMAND(this->get_error_estimate()->size() == xc.size()); + + // Calculate the error estimate using an Eigen vector then copy it to the + // continuous state vector, where the various state components can be + // analyzed. + // ε = | xc₁ - (xc₀ + h xcdot⁽ᵃ⁾) | = | xc₀ + h xcdot⁽ᵃ⁾ - xc₁ | + + // TODO(sherm1) Set err_est_vec_ to xc0 at the start and use it above to + // avoid the need for save_xc0_ and this copy altogether. + err_est_vec_ = save_xc0_; // ε ← xc₀ + + xcdot_a.ScaleAndAddToVector(h, &err_est_vec_); // ε += h xcdot⁽ᵃ⁾ + xc.ScaleAndAddToVector(-1.0, &err_est_vec_); // ε -= xc₁ + err_est_vec_ = err_est_vec_.cwiseAbs(); + this->get_mutable_error_estimate()->SetFromVector(err_est_vec_); + + // RK3 always succeeds in taking its desired step. + return true; +} + } // namespace systems } // namespace drake + +DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( + class drake::systems::RungeKutta3Integrator) diff --git a/systems/analysis/runge_kutta3_integrator.h b/systems/analysis/runge_kutta3_integrator.h index 0e23e685b6d6..311c900ed0db 100644 --- a/systems/analysis/runge_kutta3_integrator.h +++ b/systems/analysis/runge_kutta3_integrator.h @@ -3,6 +3,7 @@ #include #include +#include "drake/common/default_scalars.h" #include "drake/common/drake_copyable.h" #include "drake/systems/analysis/integrator_base.h" @@ -94,5 +95,9 @@ class RungeKutta3Integrator final : public IntegratorBase { // interval. std::unique_ptr> derivs0_, derivs1_; }; + } // namespace systems } // namespace drake + +DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( + class drake::systems::RungeKutta3Integrator) diff --git a/systems/analysis/runge_kutta5_integrator.cc b/systems/analysis/runge_kutta5_integrator.cc index 720fe564ed5d..fb6f2e26ce44 100644 --- a/systems/analysis/runge_kutta5_integrator.cc +++ b/systems/analysis/runge_kutta5_integrator.cc @@ -1,12 +1,250 @@ #include "drake/systems/analysis/runge_kutta5_integrator.h" -#include "drake/common/autodiff.h" - namespace drake { namespace systems { -DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( - class ::drake::systems::RungeKutta5Integrator) +/* + * RK5-specific initialization function. + * @throws std::logic_error if *neither* the initial step size target nor the + * maximum step size has been set before calling. + */ +template +void RungeKutta5Integrator::DoInitialize() { + using std::isnan; + // TODO(drum) Verify the integrator-specific accuracy settings below. + const double kDefaultAccuracy = 1e-5; + const double kLoosestAccuracy = 1e-3; + const double kMaxStepFraction = 0.1; // Fraction of max step size for + // less aggressive first step. + + // Set an artificial step size target, if not set already. + if (isnan(this->get_initial_step_size_target())) { + // Verify that maximum step size has been set. + if (isnan(this->get_maximum_step_size())) + throw std::logic_error( + "Neither initial step size target nor maximum " + "step size has been set!"); + + this->request_initial_step_size_target(this->get_maximum_step_size() * + kMaxStepFraction); + } + + // Sets the working accuracy to a good value. + double working_accuracy = this->get_target_accuracy(); + + // If the user asks for accuracy that is looser than the loosest this + // integrator can provide, use the integrator's loosest accuracy setting + // instead. + if (working_accuracy > kLoosestAccuracy) + working_accuracy = kLoosestAccuracy; + else if (isnan(working_accuracy)) + working_accuracy = kDefaultAccuracy; + this->set_accuracy_in_use(working_accuracy); +} + +template +bool RungeKutta5Integrator::DoStep(const T& h) { + using std::abs; + Context& context = *this->get_mutable_context(); + const T t0 = context.get_time(); + const T t1 = t0 + h; + + // CAUTION: This is performance-sensitive inner loop code that uses dangerous + // long-lived references into state and cache to avoid unnecessary copying and + // cache invalidation. Be careful not to insert calls to methods that could + // invalidate any of these references before they are used. + + // We use Butcher tableau notation with labels for each coefficient: + /* + 0 (c1) | + 1/5 (c2) | 1/5 (a21) + 3/10 (c3) | 3/40 (a31) 9/40 (a32) + 4/5 (c4) | 44/45 (a41) -56/15 (a42) 32/9 (a43) + 8/9 (c5) | 19372/6561 (a51) −25360/2187 (a52) 64448/6561 (a53) −212/729 (a54) + 1 (c6) | 9017/3168 (a61) −355/33 (a62) 46732/5247 (a63) 49/176 (a64) −5103/18656 (a65) + 1 (c7) | 35/384 (a71) 0 (a72) 500/1113 (a73) 125/192 (a74) −2187/6784 (a75) 11/84 (a76) + ------------------------------------------------------------------------------------------------------------------------------------ + 35/384 (b1) 0 (b2) 500/1113 (b3) 125/192 (b4) −2187/6784 (b5) 11/84 (b6) 0 (b7) + 5179/57600 (d1) 0 (d2) 7571/16695 (d3) 393/640 (d4) −92097/339200 (d5) 187/2100 (d6) 1/40 (d7) + */ + + // Save the continuous state at t₀. + context.get_continuous_state_vector().CopyToPreSizedVector(&save_xc0_); + + // Evaluate the derivative at t₀, xc₀ and copy the result into a temporary. + derivs1_->get_mutable_vector().SetFrom( + this->EvalTimeDerivatives(context).get_vector()); + const VectorBase& k1 = derivs1_->get_vector(); + + // Cache: k1 references a *copy* of the derivative result so is immune + // to subsequent evaluations. + + // Compute the first intermediate state and derivative (i.e., Stage 2). + // This call marks t- and xc-dependent cache entries out of date, including + // the derivative cache entry. Note that xc is a live reference into the + // context -- subsequent changes through that reference are unobservable so + // will require manual out-of-date notifications. + const double c2 = 1.0 / 5; + const double a21 = 1.0 / 5; + VectorBase& xc = + context.SetTimeAndGetMutableContinuousStateVector(t0 + c2 * h); + xc.PlusEqScaled(a21 * h, k1); + + // Evaluate the derivative (denoted k2) at t₀ + c2 * h, xc₀ + a21 * h * k1. + derivs2_->get_mutable_vector().SetFrom( + this->EvalTimeDerivatives(context).get_vector()); + const VectorBase& k2 = derivs2_->get_vector(); + + // Cache: k2 references a *copy* of the derivative result so is immune + // to subsequent evaluations. + + // Compute the second intermediate state and derivative (i.e., Stage 3). + const double c3 = 3.0 / 10; + const double a31 = 3.0 / 40; + const double a32 = 9.0 / 40; + // This call marks t- and xc-dependent cache entries out of date, including + // the derivative cache entry. (We already have the xc reference but must + // issue the out-of-date notification here since we're about to change it.) + context.SetTimeAndNoteContinuousStateChange(t0 + c3 * h); + + // Evaluate the derivative (denoted k3) at t₀ + c3 * h, + // xc₀ + a31 * h * k1 + a32 * h * k2. + xc.SetFromVector(save_xc0_); // Restore xc ← xc₀. + xc.PlusEqScaled({{a31 * h, k1}, {a32 * h, k2}}); + derivs3_->get_mutable_vector().SetFrom( + this->EvalTimeDerivatives(context).get_vector()); + const VectorBase& k3 = derivs3_->get_vector(); + + // Compute the third intermediate state and derivative (i.e., Stage 4). + const double c4 = 4.0 / 5; + const double a41 = 44.0 / 45; + const double a42 = -56.0 / 15; + const double a43 = 32.0 / 9; + // This call marks t- and xc-dependent cache entries out of date, including + // the derivative cache entry. (We already have the xc reference but must + // issue the out-of-date notification here since we're about to change it.) + context.SetTimeAndNoteContinuousStateChange(t0 + c4 * h); + + // Evaluate the derivative (denoted k4) at t₀ + c4 * h, + // xc₀ + a41 * h * k1 + a42 * h * k2 + a43 * h * k3. + xc.SetFromVector(save_xc0_); + xc.PlusEqScaled({{a41 * h, k1}, {a42 * h, k2}, {a43 * h, k3}}); + derivs4_->get_mutable_vector().SetFrom( + this->EvalTimeDerivatives(context).get_vector()); + const VectorBase& k4 = derivs4_->get_vector(); + + // Compute the fourth intermediate state and derivative (i.e., Stage 5). + const double c5 = 8.0 / 9; + const double a51 = 19372.0 / 6561; + const double a52 = -25360.0 / 2187; + const double a53 = 64448.0 / 6561; + const double a54 = -212.0 / 729; + // This call marks t- and xc-dependent cache entries out of date, including + // the derivative cache entry. (We already have the xc reference but must + // issue the out-of-date notification here since we're about to change it.) + context.SetTimeAndNoteContinuousStateChange(t0 + c5 * h); + + // Evaluate the derivative (denoted k5) at t₀ + c5 * h, + // xc₀ + a51 * h * k1 + a52 * h * k2 + a53 * h * k3 + a54 * h * k4. + xc.SetFromVector(save_xc0_); // Restore xc ← xc₀. + xc.PlusEqScaled({{a51 * h, k1}, {a52 * h, k2}, {a53 * h, k3}, {a54 * h, k4}}); + derivs5_->get_mutable_vector().SetFrom( + this->EvalTimeDerivatives(context).get_vector()); + const VectorBase& k5 = derivs5_->get_vector(); + + // Compute the fifth intermediate state and derivative (i.e., Stage 6). + const double a61 = 9017.0 / 3168; + const double a62 = -355.0 / 33; + const double a63 = 46732.0 / 5247; + const double a64 = 49.0 / 176; + const double a65 = -5103.0 / 18656; + // This call marks t- and xc-dependent cache entries out of date, including + // the derivative cache entry. (We already have the xc reference but must + // issue the out-of-date notification here since we're about to change it.) + context.SetTimeAndNoteContinuousStateChange(t1); + + // Evaluate the derivative (denoted k6) at t₀ + c6 * h, + // xc₀ + a61 * h * k1 + a62 * h * k2 + a63 * h * k3 + a64 * h * k4 + + // a65 * h * k5. + xc.SetFromVector(save_xc0_); + xc.PlusEqScaled({{a61 * h, k1}, + {a62 * h, k2}, + {a63 * h, k3}, + {a64 * h, k4}, + {a65 * h, k5}}); + derivs6_->get_mutable_vector().SetFrom( + this->EvalTimeDerivatives(context).get_vector()); + const VectorBase& k6 = derivs6_->get_vector(); + + // Cache: we're about to write through the xc reference again, so need to + // mark xc-dependent cache entries out of date; time doesn't change here. + context.NoteContinuousStateChange(); + + // Compute the propagated solution (we're able to do this because b1 = a71, + // b2 = a72, b3 = a73, b4 = a74, b5 = a75, and b6 = a76). + // Note that a72 is 0.0, so we leave that term out below. + const double a71 = 35.0 / 384; + const double a73 = 500.0 / 1113; + const double a74 = 125.0 / 192; + const double a75 = -2187.0 / 6784; + const double a76 = 11.0 / 84; + // This call marks t- and xc-dependent cache entries out of date, including + // the derivative cache entry. (We already have the xc reference but must + // issue the out-of-date notification here since we're about to change it.) + // Note that we use the simplification t1 = t0 + h * c7 = t0 + h * 1. + context.SetTimeAndNoteContinuousStateChange(t1); + + // Evaluate the derivative (denoted k7) at t₀ + c7 * h, + // xc₀ + a71 * h * k1 + a72 * h * k2 + a73 * h * k3 + a74 * h * k4 + + // a75 * h * k5 + a76 * h * k6. + xc.SetFromVector(save_xc0_); + xc.PlusEqScaled({{a71 * h, k1}, + {a73 * h, k3}, + {a74 * h, k4}, + {a75 * h, k5}, + {a76 * h, k6}}); + const ContinuousState& derivs7 = this->EvalTimeDerivatives(context); + const VectorBase& k7 = derivs7.get_vector(); + + // WARNING: k7 is a live reference into the cache. Be careful of adding + // code below that modifies the context until after k7 is used below. In fact, + // it is best not to modify the context from here on out, as modifying the + // context will effectively destroy the FSAL benefit that this integrator + // provides. + + // Calculate the 4th-order solution that will be used for the error + // estimate and then the error estimate itself. The part of this + // formula that uses the "a" coefficients (re)-computes the fifth order + // solution. The part of this formula that uses the "d" coefficients computes + // the fourth order solution. The subtraction (and negation) operations + // yield the error estimate. + // Note: d2 is 0.0; it has been removed from the formula below. + const double d1 = 5179.0 / 57600; + const double d3 = 7571.0 / 16695; + const double d4 = 393.0 / 640; + const double d5 = -92097.0 / 339200; + const double d6 = 187.0 / 2100; + const double d7 = 1.0 / 40; + err_est_vec_->SetZero(); + err_est_vec_->PlusEqScaled({{(a71 - d1) * h, k1}, + {(a73 - d3) * h, k3}, + {(a74 - d4) * h, k4}, + {(a75 - d5) * h, k5}, + {(a76 - d6) * h, k6}, + {(-d7) * h, k7}}); + + // If the size of the system has changed, the error estimate will no longer + // be sized correctly. Verify that the error estimate is the correct size. + DRAKE_DEMAND(this->get_error_estimate()->size() == xc.size()); + this->get_mutable_error_estimate()->SetFromVector( + err_est_vec_->CopyToVector().cwiseAbs()); + + // RK5 always succeeds in taking its desired step. + return true; +} } // namespace systems } // namespace drake + +DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( + class drake::systems::RungeKutta5Integrator) diff --git a/systems/analysis/runge_kutta5_integrator.h b/systems/analysis/runge_kutta5_integrator.h index 14961f61ad44..9873c98b38cf 100644 --- a/systems/analysis/runge_kutta5_integrator.h +++ b/systems/analysis/runge_kutta5_integrator.h @@ -3,6 +3,7 @@ #include #include +#include "drake/common/default_scalars.h" #include "drake/common/drake_copyable.h" #include "drake/systems/analysis/integrator_base.h" @@ -89,245 +90,8 @@ class RungeKutta5Integrator final : public IntegratorBase { derivs5_, derivs6_; }; -/** - * RK5-specific initialization function. - * @throws std::logic_error if *neither* the initial step size target nor the - * maximum step size has been set before calling. - */ -template -void RungeKutta5Integrator::DoInitialize() { - using std::isnan; - // TODO(drum) Verify the integrator-specific accuracy settings below. - const double kDefaultAccuracy = 1e-5; - const double kLoosestAccuracy = 1e-3; - const double kMaxStepFraction = 0.1; // Fraction of max step size for - // less aggressive first step. - - // Set an artificial step size target, if not set already. - if (isnan(this->get_initial_step_size_target())) { - // Verify that maximum step size has been set. - if (isnan(this->get_maximum_step_size())) - throw std::logic_error( - "Neither initial step size target nor maximum " - "step size has been set!"); - - this->request_initial_step_size_target(this->get_maximum_step_size() * - kMaxStepFraction); - } - - // Sets the working accuracy to a good value. - double working_accuracy = this->get_target_accuracy(); - - // If the user asks for accuracy that is looser than the loosest this - // integrator can provide, use the integrator's loosest accuracy setting - // instead. - if (working_accuracy > kLoosestAccuracy) - working_accuracy = kLoosestAccuracy; - else if (isnan(working_accuracy)) - working_accuracy = kDefaultAccuracy; - this->set_accuracy_in_use(working_accuracy); -} - -template -bool RungeKutta5Integrator::DoStep(const T& h) { - using std::abs; - Context& context = *this->get_mutable_context(); - const T t0 = context.get_time(); - const T t1 = t0 + h; - - // CAUTION: This is performance-sensitive inner loop code that uses dangerous - // long-lived references into state and cache to avoid unnecessary copying and - // cache invalidation. Be careful not to insert calls to methods that could - // invalidate any of these references before they are used. - - // We use Butcher tableau notation with labels for each coefficient: - /* - 0 (c1) | - 1/5 (c2) | 1/5 (a21) - 3/10 (c3) | 3/40 (a31) 9/40 (a32) - 4/5 (c4) | 44/45 (a41) -56/15 (a42) 32/9 (a43) - 8/9 (c5) | 19372/6561 (a51) −25360/2187 (a52) 64448/6561 (a53) −212/729 (a54) - 1 (c6) | 9017/3168 (a61) −355/33 (a62) 46732/5247 (a63) 49/176 (a64) −5103/18656 (a65) - 1 (c7) | 35/384 (a71) 0 (a72) 500/1113 (a73) 125/192 (a74) −2187/6784 (a75) 11/84 (a76) - ------------------------------------------------------------------------------------------------------------------------------------ - 35/384 (b1) 0 (b2) 500/1113 (b3) 125/192 (b4) −2187/6784 (b5) 11/84 (b6) 0 (b7) - 5179/57600 (d1) 0 (d2) 7571/16695 (d3) 393/640 (d4) −92097/339200 (d5) 187/2100 (d6) 1/40 (d7) - */ - - // Save the continuous state at t₀. - context.get_continuous_state_vector().CopyToPreSizedVector(&save_xc0_); - - // Evaluate the derivative at t₀, xc₀ and copy the result into a temporary. - derivs1_->get_mutable_vector().SetFrom( - this->EvalTimeDerivatives(context).get_vector()); - const VectorBase& k1 = derivs1_->get_vector(); - - // Cache: k1 references a *copy* of the derivative result so is immune - // to subsequent evaluations. - - // Compute the first intermediate state and derivative (i.e., Stage 2). - // This call marks t- and xc-dependent cache entries out of date, including - // the derivative cache entry. Note that xc is a live reference into the - // context -- subsequent changes through that reference are unobservable so - // will require manual out-of-date notifications. - const double c2 = 1.0 / 5; - const double a21 = 1.0 / 5; - VectorBase& xc = - context.SetTimeAndGetMutableContinuousStateVector(t0 + c2 * h); - xc.PlusEqScaled(a21 * h, k1); - - // Evaluate the derivative (denoted k2) at t₀ + c2 * h, xc₀ + a21 * h * k1. - derivs2_->get_mutable_vector().SetFrom( - this->EvalTimeDerivatives(context).get_vector()); - const VectorBase& k2 = derivs2_->get_vector(); - - // Cache: k2 references a *copy* of the derivative result so is immune - // to subsequent evaluations. - - // Compute the second intermediate state and derivative (i.e., Stage 3). - const double c3 = 3.0 / 10; - const double a31 = 3.0 / 40; - const double a32 = 9.0 / 40; - // This call marks t- and xc-dependent cache entries out of date, including - // the derivative cache entry. (We already have the xc reference but must - // issue the out-of-date notification here since we're about to change it.) - context.SetTimeAndNoteContinuousStateChange(t0 + c3 * h); - - // Evaluate the derivative (denoted k3) at t₀ + c3 * h, - // xc₀ + a31 * h * k1 + a32 * h * k2. - xc.SetFromVector(save_xc0_); // Restore xc ← xc₀. - xc.PlusEqScaled({{a31 * h, k1}, {a32 * h, k2}}); - derivs3_->get_mutable_vector().SetFrom( - this->EvalTimeDerivatives(context).get_vector()); - const VectorBase& k3 = derivs3_->get_vector(); - - // Compute the third intermediate state and derivative (i.e., Stage 4). - const double c4 = 4.0 / 5; - const double a41 = 44.0 / 45; - const double a42 = -56.0 / 15; - const double a43 = 32.0 / 9; - // This call marks t- and xc-dependent cache entries out of date, including - // the derivative cache entry. (We already have the xc reference but must - // issue the out-of-date notification here since we're about to change it.) - context.SetTimeAndNoteContinuousStateChange(t0 + c4 * h); - - // Evaluate the derivative (denoted k4) at t₀ + c4 * h, - // xc₀ + a41 * h * k1 + a42 * h * k2 + a43 * h * k3. - xc.SetFromVector(save_xc0_); - xc.PlusEqScaled({{a41 * h, k1}, {a42 * h, k2}, {a43 * h, k3}}); - derivs4_->get_mutable_vector().SetFrom( - this->EvalTimeDerivatives(context).get_vector()); - const VectorBase& k4 = derivs4_->get_vector(); - - // Compute the fourth intermediate state and derivative (i.e., Stage 5). - const double c5 = 8.0 / 9; - const double a51 = 19372.0 / 6561; - const double a52 = -25360.0 / 2187; - const double a53 = 64448.0 / 6561; - const double a54 = -212.0 / 729; - // This call marks t- and xc-dependent cache entries out of date, including - // the derivative cache entry. (We already have the xc reference but must - // issue the out-of-date notification here since we're about to change it.) - context.SetTimeAndNoteContinuousStateChange(t0 + c5 * h); - - // Evaluate the derivative (denoted k5) at t₀ + c5 * h, - // xc₀ + a51 * h * k1 + a52 * h * k2 + a53 * h * k3 + a54 * h * k4. - xc.SetFromVector(save_xc0_); // Restore xc ← xc₀. - xc.PlusEqScaled({{a51 * h, k1}, {a52 * h, k2}, {a53 * h, k3}, {a54 * h, k4}}); - derivs5_->get_mutable_vector().SetFrom( - this->EvalTimeDerivatives(context).get_vector()); - const VectorBase& k5 = derivs5_->get_vector(); - - // Compute the fifth intermediate state and derivative (i.e., Stage 6). - const double a61 = 9017.0 / 3168; - const double a62 = -355.0 / 33; - const double a63 = 46732.0 / 5247; - const double a64 = 49.0 / 176; - const double a65 = -5103.0 / 18656; - // This call marks t- and xc-dependent cache entries out of date, including - // the derivative cache entry. (We already have the xc reference but must - // issue the out-of-date notification here since we're about to change it.) - context.SetTimeAndNoteContinuousStateChange(t1); - - // Evaluate the derivative (denoted k6) at t₀ + c6 * h, - // xc₀ + a61 * h * k1 + a62 * h * k2 + a63 * h * k3 + a64 * h * k4 + - // a65 * h * k5. - xc.SetFromVector(save_xc0_); - xc.PlusEqScaled({{a61 * h, k1}, - {a62 * h, k2}, - {a63 * h, k3}, - {a64 * h, k4}, - {a65 * h, k5}}); - derivs6_->get_mutable_vector().SetFrom( - this->EvalTimeDerivatives(context).get_vector()); - const VectorBase& k6 = derivs6_->get_vector(); - - // Cache: we're about to write through the xc reference again, so need to - // mark xc-dependent cache entries out of date; time doesn't change here. - context.NoteContinuousStateChange(); - - // Compute the propagated solution (we're able to do this because b1 = a71, - // b2 = a72, b3 = a73, b4 = a74, b5 = a75, and b6 = a76). - // Note that a72 is 0.0, so we leave that term out below. - const double a71 = 35.0 / 384; - const double a73 = 500.0 / 1113; - const double a74 = 125.0 / 192; - const double a75 = -2187.0 / 6784; - const double a76 = 11.0 / 84; - // This call marks t- and xc-dependent cache entries out of date, including - // the derivative cache entry. (We already have the xc reference but must - // issue the out-of-date notification here since we're about to change it.) - // Note that we use the simplification t1 = t0 + h * c7 = t0 + h * 1. - context.SetTimeAndNoteContinuousStateChange(t1); - - // Evaluate the derivative (denoted k7) at t₀ + c7 * h, - // xc₀ + a71 * h * k1 + a72 * h * k2 + a73 * h * k3 + a74 * h * k4 + - // a75 * h * k5 + a76 * h * k6. - xc.SetFromVector(save_xc0_); - xc.PlusEqScaled({{a71 * h, k1}, - {a73 * h, k3}, - {a74 * h, k4}, - {a75 * h, k5}, - {a76 * h, k6}}); - const ContinuousState& derivs7 = this->EvalTimeDerivatives(context); - const VectorBase& k7 = derivs7.get_vector(); - - // WARNING: k7 is a live reference into the cache. Be careful of adding - // code below that modifies the context until after k7 is used below. In fact, - // it is best not to modify the context from here on out, as modifying the - // context will effectively destroy the FSAL benefit that this integrator - // provides. - - // Calculate the 4th-order solution that will be used for the error - // estimate and then the error estimate itself. The part of this - // formula that uses the "a" coefficients (re)-computes the fifth order - // solution. The part of this formula that uses the "d" coefficients computes - // the fourth order solution. The subtraction (and negation) operations - // yield the error estimate. - // Note: d2 is 0.0; it has been removed from the formula below. - const double d1 = 5179.0 / 57600; - const double d3 = 7571.0 / 16695; - const double d4 = 393.0 / 640; - const double d5 = -92097.0 / 339200; - const double d6 = 187.0 / 2100; - const double d7 = 1.0 / 40; - err_est_vec_->SetZero(); - err_est_vec_->PlusEqScaled({{(a71 - d1) * h, k1}, - {(a73 - d3) * h, k3}, - {(a74 - d4) * h, k4}, - {(a75 - d5) * h, k5}, - {(a76 - d6) * h, k6}, - {(-d7) * h, k7}}); - - // If the size of the system has changed, the error estimate will no longer - // be sized correctly. Verify that the error estimate is the correct size. - DRAKE_DEMAND(this->get_error_estimate()->size() == xc.size()); - this->get_mutable_error_estimate()->SetFromVector( - err_est_vec_->CopyToVector().cwiseAbs()); - - // RK5 always succeeds in taking its desired step. - return true; -} - } // namespace systems } // namespace drake + +DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( + class drake::systems::RungeKutta5Integrator) diff --git a/systems/analysis/scalar_initial_value_problem-inl.h b/systems/analysis/scalar_initial_value_problem-inl.h deleted file mode 100644 index c42a14670e30..000000000000 --- a/systems/analysis/scalar_initial_value_problem-inl.h +++ /dev/null @@ -1,5 +0,0 @@ -#pragma once - -// Instantiation of ScalarInitialValueProblem for T other than double -// requires InitialValueProblem definition. -#include "drake/systems/analysis/initial_value_problem-inl.h" diff --git a/systems/analysis/scalar_initial_value_problem.cc b/systems/analysis/scalar_initial_value_problem.cc index 4cdc8761ab0c..5cbe6f20c128 100644 --- a/systems/analysis/scalar_initial_value_problem.cc +++ b/systems/analysis/scalar_initial_value_problem.cc @@ -1,13 +1,4 @@ #include "drake/systems/analysis/scalar_initial_value_problem.h" -#include "drake/systems/analysis/scalar_initial_value_problem-inl.h" - -#include "drake/common/default_scalars.h" - -namespace drake { -namespace systems { DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( - class ScalarInitialValueProblem); - -} // namespace systems -} // namespace drake + class drake::systems::ScalarInitialValueProblem) diff --git a/systems/analysis/scalar_initial_value_problem.h b/systems/analysis/scalar_initial_value_problem.h index b374a75ff138..02f639b92cc8 100644 --- a/systems/analysis/scalar_initial_value_problem.h +++ b/systems/analysis/scalar_initial_value_problem.h @@ -1,11 +1,10 @@ #pragma once -#include #include #include #include -#include +#include "drake/common/default_scalars.h" #include "drake/common/drake_copyable.h" #include "drake/common/eigen_types.h" #include "drake/systems/analysis/initial_value_problem.h" @@ -227,3 +226,6 @@ class ScalarInitialValueProblem { } // namespace systems } // namespace drake + +DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( + class drake::systems::ScalarInitialValueProblem) diff --git a/systems/analysis/semi_explicit_euler_integrator.h b/systems/analysis/semi_explicit_euler_integrator.h index d6918b2225cc..95a1e5211e1e 100644 --- a/systems/analysis/semi_explicit_euler_integrator.h +++ b/systems/analysis/semi_explicit_euler_integrator.h @@ -1,6 +1,5 @@ #pragma once -#include #include #include "drake/common/drake_copyable.h" diff --git a/systems/analysis/simulator.cc b/systems/analysis/simulator.cc index d67e88826f3f..705f7ab6943d 100644 --- a/systems/analysis/simulator.cc +++ b/systems/analysis/simulator.cc @@ -1,15 +1,672 @@ #include "drake/systems/analysis/simulator.h" -#include -#include #include -#include "drake/common/autodiff.h" #include "drake/common/extract_double.h" +#include "drake/common/text_logging.h" +#include "drake/systems/analysis/runge_kutta3_integrator.h" namespace drake { namespace systems { +template +Simulator::Simulator(const System& system, + std::unique_ptr> context) + : Simulator(&system, nullptr, std::move(context)) {} + +template +Simulator::Simulator(std::unique_ptr> owned_system, + std::unique_ptr> context) : + Simulator(nullptr, std::move(owned_system), std::move(context)) {} + +template +Simulator::Simulator(const System* system, + std::unique_ptr> owned_system, + std::unique_ptr> context) + : owned_system_(std::move(owned_system)), + system_(owned_system_ ? *owned_system_ : *system), + context_(std::move(context)) { + // Setup defaults that should be generally reasonable. + const double max_step_size = 0.1; + const double initial_step_size = 1e-4; + const double default_accuracy = 1e-4; + + // Create a context if necessary. + if (!context_) context_ = system_.CreateDefaultContext(); + + // Create a default integrator and initialize it. + // N.B. Keep this in sync with systems::internal::kDefaultIntegratorName at + // the top of this file. + integrator_ = std::unique_ptr>( + new RungeKutta3Integrator(system_, context_.get())); + integrator_->request_initial_step_size_target(initial_step_size); + integrator_->set_maximum_step_size(max_step_size); + integrator_->set_target_accuracy(default_accuracy); + integrator_->Initialize(); + + // Allocate the necessary temporaries for storing state in update calls + // (which will then be transferred back to system state). + discrete_updates_ = system_.AllocateDiscreteVariables(); + unrestricted_updates_ = context_->CloneState(); + + // Allocate the vector of active witness functions. + witness_functions_ = std::make_unique< + std::vector*>>(); + + // Allocate the necessary temporary for witness-based event handling. + event_handler_xc_ = system_.AllocateTimeDerivatives(); +} + +template +SimulatorStatus Simulator::Initialize() { + // TODO(sherm1) Modify Context to satisfy constraints. + // TODO(sherm1) Invoke System's initial conditions computation. + if (!context_) + throw std::logic_error("Initialize(): Context has not been set."); + + // Record the current time so we can restore it later (see below). + // *Don't* use a reference here! + const T current_time = context_->get_time(); + + // Assumes success. + SimulatorStatus status(ExtractDoubleOrThrow(current_time)); + + // Initialize the integrator. + integrator_->Initialize(); + + // Restore default values. + ResetStatistics(); + + // Process all the initialization events. + auto init_events = system_.AllocateCompositeEventCollection(); + system_.GetInitializationEvents(*context_, init_events.get()); + + // Do unrestricted updates first. + HandleUnrestrictedUpdate(init_events->get_unrestricted_update_events()); + // Do restricted (discrete variable) updates next. + HandleDiscreteUpdate(init_events->get_discrete_update_events()); + + // Gets all per-step events to be handled. + per_step_events_ = system_.AllocateCompositeEventCollection(); + DRAKE_DEMAND(per_step_events_ != nullptr); + system_.GetPerStepEvents(*context_, per_step_events_.get()); + + // Allocate timed events collection. + timed_events_ = system_.AllocateCompositeEventCollection(); + DRAKE_DEMAND(timed_events_ != nullptr); + + // Ensure that CalcNextUpdateTime() can return the current time by perturbing + // current time as slightly toward negative infinity as we can allow. + const T slightly_before_current_time = internal::GetPreviousNormalizedValue( + current_time); + context_->SetTime(slightly_before_current_time); + + // Get the next timed event. + next_timed_event_time_ = + system_.CalcNextUpdateTime(*context_, timed_events_.get()); + + // Reset the context time. + context_->SetTime(current_time); + + // Indicate a timed event is to be handled, if appropriate. + if (next_timed_event_time_ == current_time) { + time_or_witness_triggered_ = kTimeTriggered; + } else { + time_or_witness_triggered_ = kNothingTriggered; + } + + // Allocate the witness function collection. + witnessed_events_ = system_.AllocateCompositeEventCollection(); + + // Do any publishes last. Merge the initialization events with per-step + // events and current_time timed events (if any). We expect all initialization + // events to precede any per-step or timed events in the merged collection. + // Note that per-step and timed discrete/unrestricted update events are *not* + // processed here; just publish events. + init_events->AddToEnd(*per_step_events_); + if (time_or_witness_triggered_ & kTimeTriggered) + init_events->AddToEnd(*timed_events_); + HandlePublish(init_events->get_publish_events()); + + // TODO(siyuan): transfer publish entirely to individual systems. + // Do a force-publish before the simulation starts. + if (publish_at_initialization_) { + system_.Publish(*context_); + ++num_publishes_; + } + + CallMonitorUpdateStatusAndMaybeThrow(&status); + + // Initialize runtime variables. + initialization_done_ = true; + + return status; +} + +// Processes UnrestrictedUpdateEvent events. +template +void Simulator::HandleUnrestrictedUpdate( + const EventCollection>& events) { + if (events.HasEvents()) { + // First, compute the unrestricted updates into a temporary buffer. + system_.CalcUnrestrictedUpdate(*context_, events, + unrestricted_updates_.get()); + // Now write the update back into the context. + system_.ApplyUnrestrictedUpdate(events, unrestricted_updates_.get(), + context_.get()); + ++num_unrestricted_updates_; + + // Mark the witness function vector as needing to be redetermined. + redetermine_active_witnesses_ = true; + } +} + +// Processes DiscreteEvent events. +template +void Simulator::HandleDiscreteUpdate( + const EventCollection>& events) { + if (events.HasEvents()) { + // First, compute the discrete updates into a temporary buffer. + system_.CalcDiscreteVariableUpdates(*context_, events, + discrete_updates_.get()); + // Then, write them back into the context. + system_.ApplyDiscreteVariableUpdate(events, discrete_updates_.get(), + context_.get()); + ++num_discrete_updates_; + } +} + +// Processes Publish events. +template +void Simulator::HandlePublish( + const EventCollection>& events) { + if (events.HasEvents()) { + system_.Publish(*context_, events); + ++num_publishes_; + } +} + +template +SimulatorStatus Simulator::AdvanceTo(const T& boundary_time) { + if (!initialization_done_) { + const SimulatorStatus initialize_status = Initialize(); + if (!initialize_status.succeeded()) + return initialize_status; + } + + DRAKE_THROW_UNLESS(boundary_time >= context_->get_time()); + + // Assume success. + SimulatorStatus status(ExtractDoubleOrThrow(boundary_time)); + + // Integrate until desired interval has completed. + auto merged_events = system_.AllocateCompositeEventCollection(); + DRAKE_DEMAND(timed_events_ != nullptr); + DRAKE_DEMAND(witnessed_events_ != nullptr); + DRAKE_DEMAND(merged_events != nullptr); + + // Clear events for the loop iteration. + merged_events->Clear(); + merged_events->AddToEnd(*per_step_events_); + + // Merge in timed and witnessed events, if necessary. + if (time_or_witness_triggered_ & kTimeTriggered) + merged_events->AddToEnd(*timed_events_); + if (time_or_witness_triggered_ & kWitnessTriggered) + merged_events->AddToEnd(*witnessed_events_); + + while (true) { + // Starting a new step on the trajectory. + const T step_start_time = context_->get_time(); + DRAKE_LOGGER_TRACE("Starting a simulation step at {}", step_start_time); + + // Delay to match target realtime rate if requested and possible. + PauseIfTooFast(); + + // The general policy here is to do actions in decreasing order of + // "violence" to the state, i.e. unrestricted -> discrete -> continuous -> + // publish. The "timed" actions happen before the "per step" ones. + + // Do unrestricted updates first. + HandleUnrestrictedUpdate(merged_events->get_unrestricted_update_events()); + // Do restricted (discrete variable) updates next. + HandleDiscreteUpdate(merged_events->get_discrete_update_events()); + + // How far can we go before we have to handle timed events? + next_timed_event_time_ = + system_.CalcNextUpdateTime(*context_, timed_events_.get()); + DRAKE_DEMAND(next_timed_event_time_ >= step_start_time); + + // Determine whether the set of events requested by the System at + // next_timed_event_time includes an Update action, a Publish action, or + // both. + T next_update_time = std::numeric_limits::infinity(); + T next_publish_time = std::numeric_limits::infinity(); + if (timed_events_->HasDiscreteUpdateEvents() || + timed_events_->HasUnrestrictedUpdateEvents()) { + next_update_time = next_timed_event_time_; + } + if (timed_events_->HasPublishEvents()) { + next_publish_time = next_timed_event_time_; + } + + // Integrate the continuous state forward in time. + time_or_witness_triggered_ = IntegrateContinuousState( + next_publish_time, + next_update_time, + next_timed_event_time_, + boundary_time, + witnessed_events_.get()); + + // Update the number of simulation steps taken. + ++num_steps_taken_; + + // TODO(sherm1) Constraint projection goes here. + + // Clear events for the next loop iteration. + merged_events->Clear(); + + // Merge in per-step events. + merged_events->AddToEnd(*per_step_events_); + + // Only merge timed / witnessed events in if an event was triggered. + if (time_or_witness_triggered_ & kTimeTriggered) + merged_events->AddToEnd(*timed_events_); + if (time_or_witness_triggered_ & kWitnessTriggered) + merged_events->AddToEnd(*witnessed_events_); + + // Handle any publish events at the end of the loop. + HandlePublish(merged_events->get_publish_events()); + + // TODO(siyuan): transfer per step publish entirely to individual systems. + // Allow System a chance to produce some output. + if (get_publish_every_time_step()) { + system_.Publish(*context_); + ++num_publishes_; + } + + CallMonitorUpdateStatusAndMaybeThrow(&status); + if (!status.succeeded()) + break; // Done. + + // Break out of the loop after timed and witnessed events are merged in + // to the event collection and after any publishes. + if (context_->get_time() >= boundary_time) + break; + } + + // TODO(edrumwri): Add test coverage to complete #8490. + redetermine_active_witnesses_ = true; + + return status; +} + +template +std::optional Simulator::GetCurrentWitnessTimeIsolation() const { + using std::max; + + // TODO(edrumwri): Add ability to disable witness time isolation through + // a Simulator setting. + + // The scale factor for witness isolation accuracy, which can allow witness + // function zeros to be isolated more or less tightly, for positive values + // less than one and greater than one, respectively. This number should be a + // reasonable default that allows witness isolation accuracy to be + // commensurate with integrator accuracy for most systems. + const double iso_scale_factor = 0.01; + + // TODO(edrumwri): Acquire characteristic time properly from the system + // (i.e., modify the System to provide this value). + const double characteristic_time = 1.0; + + // Get the accuracy setting. + const std::optional& accuracy = get_context().get_accuracy(); + + // Determine the length of the isolation interval. + if (integrator_->get_fixed_step_mode()) { + // Look for accuracy information. + if (accuracy) { + return max(integrator_->get_working_minimum_step_size(), + T(iso_scale_factor * accuracy.value() * + integrator_->get_maximum_step_size())); + } else { + return std::optional(); + } + } + + // Integration with error control isolation window determination. + if (!accuracy) { + throw std::logic_error("Integrator is not operating in fixed step mode " + "and accuracy is not set in the context."); + } + + // Note: the max computation is used (here and above) because it is + // ineffectual to attempt to isolate intervals smaller than the current time + // in the context can allow. + return max(integrator_->get_working_minimum_step_size(), + iso_scale_factor * accuracy.value() * characteristic_time); +} + +// Determines whether any witnesses trigger over the interval [t0, tw], +// where tw - t0 < ε and ε is the "witness isolation length". If one or more +// witnesses does trigger over this interval, the time (and corresponding state) +// will be advanced to tw and those witnesses will be stored in +// `triggered_witnesses` on return. On the other hand (i.e., if no witnesses) +// trigger over [t0, t0 + ε], time (and corresponding state) will be advanced +// to some tc in the open interval (t0, tf) such that no witnesses trigger +// over [t0, tc]; in other words, we deem it "safe" to integrate to tc. +// @param[in,out] triggered_witnesses on entry, the set of witness functions +// that triggered over [t0, tf]; on exit, the set of witness +// functions that triggered over [t0, tw], where tw is some time +// such that tw - t0 < ε. If no functions trigger over +// [t0, t0 + ε], `triggered_witnesses` will be empty on exit. +// @pre The time and state are at tf and x(tf), respectively, and at least +// one witness function has triggered over [t0, tf]. +// @post If `triggered_witnesses` is empty, the time and state will be +// set to some tc and x(tc), respectively, such that no witnesses trigger +// over [t0, tc]. Otherwise, the time and state will be set to tw and +// x(tw), respectively. +// @note The underlying assumption is that a witness function triggers over a +// interval [a, d] for d ≤ the maximum integrator step size if that +// witness also triggers over interval [a, b] for some b < d. Per +// WitnessFunction documentation, we assume that a witness function +// crosses zero at most once over an interval of size [t0, tf]). +template +void Simulator::IsolateWitnessTriggers( + const std::vector*>& witnesses, + const VectorX& w0, + const T& t0, const VectorX& x0, const T& tf, + std::vector*>* triggered_witnesses) { + + // Verify that the vector of triggered witnesses is non-null. + DRAKE_DEMAND(triggered_witnesses); + + // TODO(edrumwri): Speed this process using interpolation between states, + // more powerful root finding methods, and/or introducing the concept of + // a dead band. + + // Will need to alter the context repeatedly. + Context& context = get_mutable_context(); + + // Get the witness isolation interval length. + const std::optional witness_iso_len = GetCurrentWitnessTimeIsolation(); + + // Check whether witness functions *are* to be isolated. If not, the witnesses + // that were triggered on entry will be the set that is returned. + if (!witness_iso_len) + return; + + // Mini function for integrating the system forward in time from t0. + std::function integrate_forward = + [&t0, &x0, &context, this](const T& t_des) { + const T inf = std::numeric_limits::infinity(); + context.SetTime(t0); + context.get_mutable_continuous_state().SetFromVector(x0); + while (context.get_time() < t_des) + integrator_->IntegrateNoFurtherThanTime(inf, inf, t_des); + }; + + // Starting from c = (t0 + tf)/2, look for a witness function triggering + // over the interval [t0, tc]. Assuming a witness does trigger, c will + // continue moving leftward as a witness function triggers until the length of + // the time interval is small. If a witness fails to trigger as c moves + // leftward, we return, indicating that no witnesses triggered over [t0, c]. + DRAKE_LOGGER_DEBUG( + "Isolating witness functions using isolation window of {} over [{}, {}]", + witness_iso_len.value(), t0, tf); + VectorX wc(witnesses.size()); + T a = t0; + T b = tf; + do { + // Compute the midpoint and evaluate the witness functions at it. + T c = (a + b) / 2; + DRAKE_LOGGER_DEBUG("Integrating forward to time {}", c); + integrate_forward(c); + + // See whether any witness functions trigger. + bool trigger = false; + for (size_t i = 0; i < witnesses.size(); ++i) { + wc[i] = get_system().CalcWitnessValue(context, *witnesses[i]); + if (witnesses[i]->should_trigger(w0[i], wc[i])) + trigger = true; + } + + // If no witness function triggered, we can continue integrating forward. + if (!trigger) { + // NOTE: Since we're always checking that the sign changes over [t0,c], + // it's also feasible to replace the two lines below with "a = c" without + // violating Simulator's contract to only integrate once over the interval + // [a, c], for some c <= b before per-step events are handled (i.e., it's + // unacceptable to take two steps of (c - a)/2 without processing per-step + // events first). That change would avoid handling unnecessary per-step + // events- we know no other events are to be handled between t0 and tf- + // but the current logic appears easier to follow. + DRAKE_LOGGER_DEBUG("No witness functions triggered up to {}", c); + triggered_witnesses->clear(); + return; // Time is c. + } else { + b = c; + } + } while (b - a > witness_iso_len.value()); + + // Determine the set of triggered witnesses. + triggered_witnesses->clear(); + for (size_t i = 0; i < witnesses.size(); ++i) { + if (witnesses[i]->should_trigger(w0[i], wc[i])) + triggered_witnesses->push_back(witnesses[i]); + } +} + +// Evaluates the given vector of witness functions. +template +VectorX Simulator::EvaluateWitnessFunctions( + const std::vector*>& witness_functions, + const Context& context) const { + const System& system = get_system(); + VectorX weval(witness_functions.size()); + for (size_t i = 0; i < witness_functions.size(); ++i) + weval[i] = system.CalcWitnessValue(context, *witness_functions[i]); + return weval; +} + +// Determines whether at least one of a collection of witness functions +// triggered over a time interval [t0, tf] using the values of those functions +// evaluated at the left and right hand sides of that interval. +// @param witness_functions a vector of all witness functions active over +// [t0, tf]. +// @param w0 the values of the witnesses evaluated at t0. +// @param wf the values of the witnesses evaluated at tf. +// @param [out] triggered_witnesses Returns one of the witnesses that triggered, +// if any. +// @returns `true` if a witness triggered or `false` otherwise. +template +bool Simulator::DidWitnessTrigger( + const std::vector*>& witness_functions, + const VectorX& w0, + const VectorX& wf, + std::vector*>* triggered_witnesses) { + // See whether a witness function triggered. + triggered_witnesses->clear(); + bool witness_triggered = false; + for (size_t i = 0; i < witness_functions.size() && !witness_triggered; ++i) { + if (witness_functions[i]->should_trigger(w0[i], wf[i])) { + witness_triggered = true; + triggered_witnesses->push_back(witness_functions[i]); + } + } + + return witness_triggered; +} + +// Populates event data for `event` triggered by a witness function (`witness`) +// that was evaluated over the time interval [`t0`, `tf`] and adds it to the +// given event collection (`events`). +template +void Simulator::PopulateEventDataForTriggeredWitness( + const T& t0, const T& tf, const WitnessFunction* witness, + Event* event, CompositeEventCollection* events) const { + // Populate the event data. + auto event_data = static_cast*>( + event->get_mutable_event_data()); + event_data->set_triggered_witness(witness); + event_data->set_t0(t0); + event_data->set_tf(tf); + event_data->set_xc0(event_handler_xc_.get()); + event_data->set_xcf(&context_->get_continuous_state()); + get_system().AddTriggeredWitnessFunctionToCompositeEventCollection( + event, events); +} + +// (Re)determines the set of witness functions active over this interval, +// if necessary. +template +void Simulator::RedetermineActiveWitnessFunctionsIfNecessary() { + const System& system = get_system(); + if (redetermine_active_witnesses_) { + witness_functions_->clear(); + system.GetWitnessFunctions(get_context(), witness_functions_.get()); + redetermine_active_witnesses_ = false; + } +} + +// Integrates the continuous state forward in time while also locating +// the first zero of any triggered witness functions. +// @param next_publish_time the time at which the next publish event occurs. +// @param next_update_time the time at which the next update event occurs. +// @param time_of_next_event the time at which the next timed event occurs. +// @param boundary_time the maximum time to advance to. +// @param events a non-null collection of events, which the method will clear +// on entry. +// @returns the event triggers that terminated integration. +template +typename Simulator::TimeOrWitnessTriggered +Simulator::IntegrateContinuousState( + const T& next_publish_time, const T& next_update_time, + const T&, const T& boundary_time, + CompositeEventCollection* events) { + using std::abs; + + // Clear the composite event collection. + DRAKE_ASSERT(events); + events->Clear(); + + // Save the time and current state. + const Context& context = get_context(); + const T t0 = context.get_time(); + const VectorX x0 = context.get_continuous_state().CopyToVector(); + + // Get the set of witness functions active at the current state. + RedetermineActiveWitnessFunctionsIfNecessary(); + const auto& witness_functions = *witness_functions_; + + // Evaluate the witness functions. + w0_ = EvaluateWitnessFunctions(witness_functions, context); + + // Attempt to integrate. Updates and boundary times are consciously + // distinguished between. See internal documentation for + // IntegratorBase::StepOnceAtMost() for more information. + typename IntegratorBase::StepResult result = + integrator_->IntegrateNoFurtherThanTime( + next_publish_time, next_update_time, boundary_time); + const T tf = context.get_time(); + + // Evaluate the witness functions again. + wf_ = EvaluateWitnessFunctions(witness_functions, context); + + // Triggering requires isolating the witness function time. + if (DidWitnessTrigger(witness_functions, w0_, wf_, &triggered_witnesses_)) { + // Isolate the time that the witness function triggered. If witness triggers + // are detected in the interval [t0, tf], any additional time-triggered + // events are only relevant iff at least one witness function is + // successfully isolated (see IsolateWitnessTriggers() for details). + IsolateWitnessTriggers( + witness_functions, w0_, t0, x0, tf, &triggered_witnesses_); + + // Store the state at x0 in the temporary continuous state. We only do this + // if there are triggered witnesses (even though `witness_triggered` is + // `true`, the witness might not have actually triggered after isolation). + if (!triggered_witnesses_.empty()) + event_handler_xc_->SetFromVector(x0); + + // Store witness function(s) that triggered. + for (const WitnessFunction* fn : triggered_witnesses_) { + DRAKE_LOGGER_DEBUG("Witness function {} crossed zero at time {}", + fn->description(), context.get_time()); + + // Skip witness functions that have no associated event (i.e., skip + // witness functions whose sole purpose is to insert a break in the + // integration of continuous state). + if (!fn->get_event()) + continue; + + // Get the event object that corresponds to this witness function. If + // Simulator has yet to create this object, go ahead and create it. + auto& event = witness_function_events_[fn]; + if (!event) { + event = fn->get_event()->Clone(); + event->set_trigger_type(TriggerType::kWitness); + event->set_event_data(std::make_unique>()); + } + PopulateEventDataForTriggeredWitness(t0, tf, fn, event.get(), events); + } + + // When successful, the isolation process produces a vector of witnesses + // that trigger over every interval [t0, ti], ∀ti in (t0, tf]. If this + // vector (triggered_witnesses_) is empty, then time advanced to the first + // ti such that no witnesses triggered over [t0, ti]. + const T& ti = context_->get_time(); + if (!triggered_witnesses_.empty()) { + // We now know that integration terminated at a witness function crossing. + // Now we need to look for the unusual case in which a timed event should + // also trigger simultaneously. + // IntegratorBase::IntegrateNoFurtherThanTime(.) pledges to step no + // further than min(next_publish_time, next_update_time, boundary_time), + // so we'll verify that assertion. + DRAKE_DEMAND(ti <= next_update_time && tf <= next_publish_time); + if (ti == next_update_time || ti == next_publish_time) { + return kBothTriggered; + } else { + return kWitnessTriggered; + } + } else { + // Integration didn't succeed on the larger interval [t0, tf]; instead, + // the continuous state was integrated to the intermediate time ti, where + // t0 < ti < tf. Since any publishes/updates must occur at tf, there + // should be no triggers. + DRAKE_DEMAND(t0 < ti && ti < tf); + + // The contract for IntegratorBase::IntegrateNoFurtherThanTime() specifies + // that tf must be less than or equal to next_update_time and + // next_publish_time. Since ti must be strictly less than tf, it follows + // that ti must be strictly less than next_update_time and + // next_publish_time. + DRAKE_DEMAND(next_update_time > ti && next_publish_time > ti); + return kNothingTriggered; + } + } + + // No witness function triggered; handle integration as usual. + // Updates and boundary times are consciously distinguished between. See + // internal documentation for IntegratorBase::StepOnceAtMost() for more + // information. + switch (result) { + case IntegratorBase::kReachedUpdateTime: + case IntegratorBase::kReachedPublishTime: + return kTimeTriggered; + + // We do nothing for these two cases. + case IntegratorBase::kTimeHasAdvanced: + case IntegratorBase::kReachedBoundaryTime: + return kNothingTriggered; + + case IntegratorBase::kReachedZeroCrossing: + case IntegratorBase::kReachedStepLimit: + throw std::logic_error("Unexpected integrator result"); + } + + DRAKE_UNREACHABLE(); +} + template void Simulator::PauseIfTooFast() const { if (target_realtime_rate_ <= 0) return; // Run at full speed. @@ -44,8 +701,8 @@ void Simulator::ResetStatistics() { initial_realtime_ = Clock::now(); } -template class Simulator; -template class Simulator; - } // namespace systems } // namespace drake + +DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( + class drake::systems::Simulator) diff --git a/systems/analysis/simulator.h b/systems/analysis/simulator.h index 6a771be69876..45b0111e84ad 100644 --- a/systems/analysis/simulator.h +++ b/systems/analysis/simulator.h @@ -2,22 +2,18 @@ #include #include -#include #include #include #include -#include #include #include #include -#include "drake/common/autodiff.h" +#include "drake/common/default_scalars.h" #include "drake/common/drake_assert.h" #include "drake/common/drake_copyable.h" #include "drake/common/extract_double.h" -#include "drake/common/text_logging.h" #include "drake/systems/analysis/integrator_base.h" -#include "drake/systems/analysis/runge_kutta3_integrator.h" #include "drake/systems/analysis/simulator_status.h" #include "drake/systems/framework/context.h" #include "drake/systems/framework/system.h" @@ -881,54 +877,6 @@ class Simulator { std::function&)> monitor_; }; -template -Simulator::Simulator(const System& system, - std::unique_ptr> context) - : Simulator(&system, nullptr, std::move(context)) {} - -template -Simulator::Simulator(std::unique_ptr> owned_system, - std::unique_ptr> context) : - Simulator(nullptr, std::move(owned_system), std::move(context)) {} - -template -Simulator::Simulator(const System* system, - std::unique_ptr> owned_system, - std::unique_ptr> context) - : owned_system_(std::move(owned_system)), - system_(owned_system_ ? *owned_system_ : *system), - context_(std::move(context)) { - // Setup defaults that should be generally reasonable. - const double max_step_size = 0.1; - const double initial_step_size = 1e-4; - const double default_accuracy = 1e-4; - - // Create a context if necessary. - if (!context_) context_ = system_.CreateDefaultContext(); - - // Create a default integrator and initialize it. - // N.B. Keep this in sync with systems::internal::kDefaultIntegratorName at - // the top of this file. - integrator_ = std::unique_ptr>( - new RungeKutta3Integrator(system_, context_.get())); - integrator_->request_initial_step_size_target(initial_step_size); - integrator_->set_maximum_step_size(max_step_size); - integrator_->set_target_accuracy(default_accuracy); - integrator_->Initialize(); - - // Allocate the necessary temporaries for storing state in update calls - // (which will then be transferred back to system state). - discrete_updates_ = system_.AllocateDiscreteVariables(); - unrestricted_updates_ = context_->CloneState(); - - // Allocate the vector of active witness functions. - witness_functions_ = std::make_unique< - std::vector*>>(); - - // Allocate the necessary temporary for witness-based event handling. - event_handler_xc_ = system_.AllocateTimeDerivatives(); -} - #ifndef DRAKE_DOXYGEN_CXX namespace internal { // This function computes the previous (i.e., that which is one step closer to @@ -980,615 +928,8 @@ T GetPreviousNormalizedValue(const T& value) { } // namespace internal #endif -template -SimulatorStatus Simulator::Initialize() { - // TODO(sherm1) Modify Context to satisfy constraints. - // TODO(sherm1) Invoke System's initial conditions computation. - if (!context_) - throw std::logic_error("Initialize(): Context has not been set."); - - // Record the current time so we can restore it later (see below). - // *Don't* use a reference here! - const T current_time = context_->get_time(); - - // Assumes success. - SimulatorStatus status(ExtractDoubleOrThrow(current_time)); - - // Initialize the integrator. - integrator_->Initialize(); - - // Restore default values. - ResetStatistics(); - - // Process all the initialization events. - auto init_events = system_.AllocateCompositeEventCollection(); - system_.GetInitializationEvents(*context_, init_events.get()); - - // Do unrestricted updates first. - HandleUnrestrictedUpdate(init_events->get_unrestricted_update_events()); - // Do restricted (discrete variable) updates next. - HandleDiscreteUpdate(init_events->get_discrete_update_events()); - - // Gets all per-step events to be handled. - per_step_events_ = system_.AllocateCompositeEventCollection(); - DRAKE_DEMAND(per_step_events_ != nullptr); - system_.GetPerStepEvents(*context_, per_step_events_.get()); - - // Allocate timed events collection. - timed_events_ = system_.AllocateCompositeEventCollection(); - DRAKE_DEMAND(timed_events_ != nullptr); - - // Ensure that CalcNextUpdateTime() can return the current time by perturbing - // current time as slightly toward negative infinity as we can allow. - const T slightly_before_current_time = internal::GetPreviousNormalizedValue( - current_time); - context_->SetTime(slightly_before_current_time); - - // Get the next timed event. - next_timed_event_time_ = - system_.CalcNextUpdateTime(*context_, timed_events_.get()); - - // Reset the context time. - context_->SetTime(current_time); - - // Indicate a timed event is to be handled, if appropriate. - if (next_timed_event_time_ == current_time) { - time_or_witness_triggered_ = kTimeTriggered; - } else { - time_or_witness_triggered_ = kNothingTriggered; - } - - // Allocate the witness function collection. - witnessed_events_ = system_.AllocateCompositeEventCollection(); - - // Do any publishes last. Merge the initialization events with per-step - // events and current_time timed events (if any). We expect all initialization - // events to precede any per-step or timed events in the merged collection. - // Note that per-step and timed discrete/unrestricted update events are *not* - // processed here; just publish events. - init_events->AddToEnd(*per_step_events_); - if (time_or_witness_triggered_ & kTimeTriggered) - init_events->AddToEnd(*timed_events_); - HandlePublish(init_events->get_publish_events()); - - // TODO(siyuan): transfer publish entirely to individual systems. - // Do a force-publish before the simulation starts. - if (publish_at_initialization_) { - system_.Publish(*context_); - ++num_publishes_; - } - - CallMonitorUpdateStatusAndMaybeThrow(&status); - - // Initialize runtime variables. - initialization_done_ = true; - - return status; -} - -// Processes UnrestrictedUpdateEvent events. -template -void Simulator::HandleUnrestrictedUpdate( - const EventCollection>& events) { - if (events.HasEvents()) { - // First, compute the unrestricted updates into a temporary buffer. - system_.CalcUnrestrictedUpdate(*context_, events, - unrestricted_updates_.get()); - // Now write the update back into the context. - system_.ApplyUnrestrictedUpdate(events, unrestricted_updates_.get(), - context_.get()); - ++num_unrestricted_updates_; - - // Mark the witness function vector as needing to be redetermined. - redetermine_active_witnesses_ = true; - } -} - -// Processes DiscreteEvent events. -template -void Simulator::HandleDiscreteUpdate( - const EventCollection>& events) { - if (events.HasEvents()) { - // First, compute the discrete updates into a temporary buffer. - system_.CalcDiscreteVariableUpdates(*context_, events, - discrete_updates_.get()); - // Then, write them back into the context. - system_.ApplyDiscreteVariableUpdate(events, discrete_updates_.get(), - context_.get()); - ++num_discrete_updates_; - } -} - -// Processes Publish events. -template -void Simulator::HandlePublish( - const EventCollection>& events) { - if (events.HasEvents()) { - system_.Publish(*context_, events); - ++num_publishes_; - } -} - -template -SimulatorStatus Simulator::AdvanceTo(const T& boundary_time) { - if (!initialization_done_) { - const SimulatorStatus initialize_status = Initialize(); - if (!initialize_status.succeeded()) - return initialize_status; - } - - DRAKE_THROW_UNLESS(boundary_time >= context_->get_time()); - - // Assume success. - SimulatorStatus status(ExtractDoubleOrThrow(boundary_time)); - - // Integrate until desired interval has completed. - auto merged_events = system_.AllocateCompositeEventCollection(); - DRAKE_DEMAND(timed_events_ != nullptr); - DRAKE_DEMAND(witnessed_events_ != nullptr); - DRAKE_DEMAND(merged_events != nullptr); - - // Clear events for the loop iteration. - merged_events->Clear(); - merged_events->AddToEnd(*per_step_events_); - - // Merge in timed and witnessed events, if necessary. - if (time_or_witness_triggered_ & kTimeTriggered) - merged_events->AddToEnd(*timed_events_); - if (time_or_witness_triggered_ & kWitnessTriggered) - merged_events->AddToEnd(*witnessed_events_); - - while (true) { - // Starting a new step on the trajectory. - const T step_start_time = context_->get_time(); - DRAKE_LOGGER_TRACE("Starting a simulation step at {}", step_start_time); - - // Delay to match target realtime rate if requested and possible. - PauseIfTooFast(); - - // The general policy here is to do actions in decreasing order of - // "violence" to the state, i.e. unrestricted -> discrete -> continuous -> - // publish. The "timed" actions happen before the "per step" ones. - - // Do unrestricted updates first. - HandleUnrestrictedUpdate(merged_events->get_unrestricted_update_events()); - // Do restricted (discrete variable) updates next. - HandleDiscreteUpdate(merged_events->get_discrete_update_events()); - - // How far can we go before we have to handle timed events? - next_timed_event_time_ = - system_.CalcNextUpdateTime(*context_, timed_events_.get()); - DRAKE_DEMAND(next_timed_event_time_ >= step_start_time); - - // Determine whether the set of events requested by the System at - // next_timed_event_time includes an Update action, a Publish action, or - // both. - T next_update_time = std::numeric_limits::infinity(); - T next_publish_time = std::numeric_limits::infinity(); - if (timed_events_->HasDiscreteUpdateEvents() || - timed_events_->HasUnrestrictedUpdateEvents()) { - next_update_time = next_timed_event_time_; - } - if (timed_events_->HasPublishEvents()) { - next_publish_time = next_timed_event_time_; - } - - // Integrate the continuous state forward in time. - time_or_witness_triggered_ = IntegrateContinuousState( - next_publish_time, - next_update_time, - next_timed_event_time_, - boundary_time, - witnessed_events_.get()); - - // Update the number of simulation steps taken. - ++num_steps_taken_; - - // TODO(sherm1) Constraint projection goes here. - - // Clear events for the next loop iteration. - merged_events->Clear(); - - // Merge in per-step events. - merged_events->AddToEnd(*per_step_events_); - - // Only merge timed / witnessed events in if an event was triggered. - if (time_or_witness_triggered_ & kTimeTriggered) - merged_events->AddToEnd(*timed_events_); - if (time_or_witness_triggered_ & kWitnessTriggered) - merged_events->AddToEnd(*witnessed_events_); - - // Handle any publish events at the end of the loop. - HandlePublish(merged_events->get_publish_events()); - - // TODO(siyuan): transfer per step publish entirely to individual systems. - // Allow System a chance to produce some output. - if (get_publish_every_time_step()) { - system_.Publish(*context_); - ++num_publishes_; - } - - CallMonitorUpdateStatusAndMaybeThrow(&status); - if (!status.succeeded()) - break; // Done. - - // Break out of the loop after timed and witnessed events are merged in - // to the event collection and after any publishes. - if (context_->get_time() >= boundary_time) - break; - } - - // TODO(edrumwri): Add test coverage to complete #8490. - redetermine_active_witnesses_ = true; - - return status; -} - -template -std::optional Simulator::GetCurrentWitnessTimeIsolation() const { - using std::max; - - // TODO(edrumwri): Add ability to disable witness time isolation through - // a Simulator setting. - - // The scale factor for witness isolation accuracy, which can allow witness - // function zeros to be isolated more or less tightly, for positive values - // less than one and greater than one, respectively. This number should be a - // reasonable default that allows witness isolation accuracy to be - // commensurate with integrator accuracy for most systems. - const double iso_scale_factor = 0.01; - - // TODO(edrumwri): Acquire characteristic time properly from the system - // (i.e., modify the System to provide this value). - const double characteristic_time = 1.0; - - // Get the accuracy setting. - const std::optional& accuracy = get_context().get_accuracy(); - - // Determine the length of the isolation interval. - if (integrator_->get_fixed_step_mode()) { - // Look for accuracy information. - if (accuracy) { - return max(integrator_->get_working_minimum_step_size(), - T(iso_scale_factor * accuracy.value() * - integrator_->get_maximum_step_size())); - } else { - return std::optional(); - } - } - - // Integration with error control isolation window determination. - if (!accuracy) { - throw std::logic_error("Integrator is not operating in fixed step mode " - "and accuracy is not set in the context."); - } - - // Note: the max computation is used (here and above) because it is - // ineffectual to attempt to isolate intervals smaller than the current time - // in the context can allow. - return max(integrator_->get_working_minimum_step_size(), - iso_scale_factor * accuracy.value() * characteristic_time); -} - -// Determines whether any witnesses trigger over the interval [t0, tw], -// where tw - t0 < ε and ε is the "witness isolation length". If one or more -// witnesses does trigger over this interval, the time (and corresponding state) -// will be advanced to tw and those witnesses will be stored in -// `triggered_witnesses` on return. On the other hand (i.e., if no witnesses) -// trigger over [t0, t0 + ε], time (and corresponding state) will be advanced -// to some tc in the open interval (t0, tf) such that no witnesses trigger -// over [t0, tc]; in other words, we deem it "safe" to integrate to tc. -// @param[in,out] triggered_witnesses on entry, the set of witness functions -// that triggered over [t0, tf]; on exit, the set of witness -// functions that triggered over [t0, tw], where tw is some time -// such that tw - t0 < ε. If no functions trigger over -// [t0, t0 + ε], `triggered_witnesses` will be empty on exit. -// @pre The time and state are at tf and x(tf), respectively, and at least -// one witness function has triggered over [t0, tf]. -// @post If `triggered_witnesses` is empty, the time and state will be -// set to some tc and x(tc), respectively, such that no witnesses trigger -// over [t0, tc]. Otherwise, the time and state will be set to tw and -// x(tw), respectively. -// @note The underlying assumption is that a witness function triggers over a -// interval [a, d] for d ≤ the maximum integrator step size if that -// witness also triggers over interval [a, b] for some b < d. Per -// WitnessFunction documentation, we assume that a witness function -// crosses zero at most once over an interval of size [t0, tf]). -template -void Simulator::IsolateWitnessTriggers( - const std::vector*>& witnesses, - const VectorX& w0, - const T& t0, const VectorX& x0, const T& tf, - std::vector*>* triggered_witnesses) { - - // Verify that the vector of triggered witnesses is non-null. - DRAKE_DEMAND(triggered_witnesses); - - // TODO(edrumwri): Speed this process using interpolation between states, - // more powerful root finding methods, and/or introducing the concept of - // a dead band. - - // Will need to alter the context repeatedly. - Context& context = get_mutable_context(); - - // Get the witness isolation interval length. - const std::optional witness_iso_len = GetCurrentWitnessTimeIsolation(); - - // Check whether witness functions *are* to be isolated. If not, the witnesses - // that were triggered on entry will be the set that is returned. - if (!witness_iso_len) - return; - - // Mini function for integrating the system forward in time from t0. - std::function integrate_forward = - [&t0, &x0, &context, this](const T& t_des) { - const T inf = std::numeric_limits::infinity(); - context.SetTime(t0); - context.get_mutable_continuous_state().SetFromVector(x0); - while (context.get_time() < t_des) - integrator_->IntegrateNoFurtherThanTime(inf, inf, t_des); - }; - - // Starting from c = (t0 + tf)/2, look for a witness function triggering - // over the interval [t0, tc]. Assuming a witness does trigger, c will - // continue moving leftward as a witness function triggers until the length of - // the time interval is small. If a witness fails to trigger as c moves - // leftward, we return, indicating that no witnesses triggered over [t0, c]. - DRAKE_LOGGER_DEBUG( - "Isolating witness functions using isolation window of {} over [{}, {}]", - witness_iso_len.value(), t0, tf); - VectorX wc(witnesses.size()); - T a = t0; - T b = tf; - do { - // Compute the midpoint and evaluate the witness functions at it. - T c = (a + b) / 2; - DRAKE_LOGGER_DEBUG("Integrating forward to time {}", c); - integrate_forward(c); - - // See whether any witness functions trigger. - bool trigger = false; - for (size_t i = 0; i < witnesses.size(); ++i) { - wc[i] = get_system().CalcWitnessValue(context, *witnesses[i]); - if (witnesses[i]->should_trigger(w0[i], wc[i])) - trigger = true; - } - - // If no witness function triggered, we can continue integrating forward. - if (!trigger) { - // NOTE: Since we're always checking that the sign changes over [t0,c], - // it's also feasible to replace the two lines below with "a = c" without - // violating Simulator's contract to only integrate once over the interval - // [a, c], for some c <= b before per-step events are handled (i.e., it's - // unacceptable to take two steps of (c - a)/2 without processing per-step - // events first). That change would avoid handling unnecessary per-step - // events- we know no other events are to be handled between t0 and tf- - // but the current logic appears easier to follow. - DRAKE_LOGGER_DEBUG("No witness functions triggered up to {}", c); - triggered_witnesses->clear(); - return; // Time is c. - } else { - b = c; - } - } while (b - a > witness_iso_len.value()); - - // Determine the set of triggered witnesses. - triggered_witnesses->clear(); - for (size_t i = 0; i < witnesses.size(); ++i) { - if (witnesses[i]->should_trigger(w0[i], wc[i])) - triggered_witnesses->push_back(witnesses[i]); - } -} - -// Evaluates the given vector of witness functions. -template -VectorX Simulator::EvaluateWitnessFunctions( - const std::vector*>& witness_functions, - const Context& context) const { - const System& system = get_system(); - VectorX weval(witness_functions.size()); - for (size_t i = 0; i < witness_functions.size(); ++i) - weval[i] = system.CalcWitnessValue(context, *witness_functions[i]); - return weval; -} - -// Determines whether at least one of a collection of witness functions -// triggered over a time interval [t0, tf] using the values of those functions -// evaluated at the left and right hand sides of that interval. -// @param witness_functions a vector of all witness functions active over -// [t0, tf]. -// @param w0 the values of the witnesses evaluated at t0. -// @param wf the values of the witnesses evaluated at tf. -// @param [out] triggered_witnesses Returns one of the witnesses that triggered, -// if any. -// @returns `true` if a witness triggered or `false` otherwise. -template -bool Simulator::DidWitnessTrigger( - const std::vector*>& witness_functions, - const VectorX& w0, - const VectorX& wf, - std::vector*>* triggered_witnesses) { - // See whether a witness function triggered. - triggered_witnesses->clear(); - bool witness_triggered = false; - for (size_t i = 0; i < witness_functions.size() && !witness_triggered; ++i) { - if (witness_functions[i]->should_trigger(w0[i], wf[i])) { - witness_triggered = true; - triggered_witnesses->push_back(witness_functions[i]); - } - } - - return witness_triggered; -} - -// Populates event data for `event` triggered by a witness function (`witness`) -// that was evaluated over the time interval [`t0`, `tf`] and adds it to the -// given event collection (`events`). -template -void Simulator::PopulateEventDataForTriggeredWitness( - const T& t0, const T& tf, const WitnessFunction* witness, - Event* event, CompositeEventCollection* events) const { - // Populate the event data. - auto event_data = static_cast*>( - event->get_mutable_event_data()); - event_data->set_triggered_witness(witness); - event_data->set_t0(t0); - event_data->set_tf(tf); - event_data->set_xc0(event_handler_xc_.get()); - event_data->set_xcf(&context_->get_continuous_state()); - get_system().AddTriggeredWitnessFunctionToCompositeEventCollection( - event, events); -} - -// (Re)determines the set of witness functions active over this interval, -// if necessary. -template -void Simulator::RedetermineActiveWitnessFunctionsIfNecessary() { - const System& system = get_system(); - if (redetermine_active_witnesses_) { - witness_functions_->clear(); - system.GetWitnessFunctions(get_context(), witness_functions_.get()); - redetermine_active_witnesses_ = false; - } -} - -// Integrates the continuous state forward in time while also locating -// the first zero of any triggered witness functions. -// @param next_publish_time the time at which the next publish event occurs. -// @param next_update_time the time at which the next update event occurs. -// @param time_of_next_event the time at which the next timed event occurs. -// @param boundary_time the maximum time to advance to. -// @param events a non-null collection of events, which the method will clear -// on entry. -// @returns the event triggers that terminated integration. -template -typename Simulator::TimeOrWitnessTriggered -Simulator::IntegrateContinuousState( - const T& next_publish_time, const T& next_update_time, - const T&, const T& boundary_time, - CompositeEventCollection* events) { - using std::abs; - - // Clear the composite event collection. - DRAKE_ASSERT(events); - events->Clear(); - - // Save the time and current state. - const Context& context = get_context(); - const T t0 = context.get_time(); - const VectorX x0 = context.get_continuous_state().CopyToVector(); - - // Get the set of witness functions active at the current state. - RedetermineActiveWitnessFunctionsIfNecessary(); - const auto& witness_functions = *witness_functions_; - - // Evaluate the witness functions. - w0_ = EvaluateWitnessFunctions(witness_functions, context); - - // Attempt to integrate. Updates and boundary times are consciously - // distinguished between. See internal documentation for - // IntegratorBase::StepOnceAtMost() for more information. - typename IntegratorBase::StepResult result = - integrator_->IntegrateNoFurtherThanTime( - next_publish_time, next_update_time, boundary_time); - const T tf = context.get_time(); - - // Evaluate the witness functions again. - wf_ = EvaluateWitnessFunctions(witness_functions, context); - - // Triggering requires isolating the witness function time. - if (DidWitnessTrigger(witness_functions, w0_, wf_, &triggered_witnesses_)) { - // Isolate the time that the witness function triggered. If witness triggers - // are detected in the interval [t0, tf], any additional time-triggered - // events are only relevant iff at least one witness function is - // successfully isolated (see IsolateWitnessTriggers() for details). - IsolateWitnessTriggers( - witness_functions, w0_, t0, x0, tf, &triggered_witnesses_); - - // Store the state at x0 in the temporary continuous state. We only do this - // if there are triggered witnesses (even though `witness_triggered` is - // `true`, the witness might not have actually triggered after isolation). - if (!triggered_witnesses_.empty()) - event_handler_xc_->SetFromVector(x0); - - // Store witness function(s) that triggered. - for (const WitnessFunction* fn : triggered_witnesses_) { - DRAKE_LOGGER_DEBUG("Witness function {} crossed zero at time {}", - fn->description(), context.get_time()); - - // Skip witness functions that have no associated event (i.e., skip - // witness functions whose sole purpose is to insert a break in the - // integration of continuous state). - if (!fn->get_event()) - continue; - - // Get the event object that corresponds to this witness function. If - // Simulator has yet to create this object, go ahead and create it. - auto& event = witness_function_events_[fn]; - if (!event) { - event = fn->get_event()->Clone(); - event->set_trigger_type(TriggerType::kWitness); - event->set_event_data(std::make_unique>()); - } - PopulateEventDataForTriggeredWitness(t0, tf, fn, event.get(), events); - } - - // When successful, the isolation process produces a vector of witnesses - // that trigger over every interval [t0, ti], ∀ti in (t0, tf]. If this - // vector (triggered_witnesses_) is empty, then time advanced to the first - // ti such that no witnesses triggered over [t0, ti]. - const T& ti = context_->get_time(); - if (!triggered_witnesses_.empty()) { - // We now know that integration terminated at a witness function crossing. - // Now we need to look for the unusual case in which a timed event should - // also trigger simultaneously. - // IntegratorBase::IntegrateNoFurtherThanTime(.) pledges to step no - // further than min(next_publish_time, next_update_time, boundary_time), - // so we'll verify that assertion. - DRAKE_DEMAND(ti <= next_update_time && tf <= next_publish_time); - if (ti == next_update_time || ti == next_publish_time) { - return kBothTriggered; - } else { - return kWitnessTriggered; - } - } else { - // Integration didn't succeed on the larger interval [t0, tf]; instead, - // the continuous state was integrated to the intermediate time ti, where - // t0 < ti < tf. Since any publishes/updates must occur at tf, there - // should be no triggers. - DRAKE_DEMAND(t0 < ti && ti < tf); - - // The contract for IntegratorBase::IntegrateNoFurtherThanTime() specifies - // that tf must be less than or equal to next_update_time and - // next_publish_time. Since ti must be strictly less than tf, it follows - // that ti must be strictly less than next_update_time and - // next_publish_time. - DRAKE_DEMAND(next_update_time > ti && next_publish_time > ti); - return kNothingTriggered; - } - } - - // No witness function triggered; handle integration as usual. - // Updates and boundary times are consciously distinguished between. See - // internal documentation for IntegratorBase::StepOnceAtMost() for more - // information. - switch (result) { - case IntegratorBase::kReachedUpdateTime: - case IntegratorBase::kReachedPublishTime: - return kTimeTriggered; - - // We do nothing for these two cases. - case IntegratorBase::kTimeHasAdvanced: - case IntegratorBase::kReachedBoundaryTime: - return kNothingTriggered; - - case IntegratorBase::kReachedZeroCrossing: - case IntegratorBase::kReachedStepLimit: - throw std::logic_error("Unexpected integrator result"); - } - - DRAKE_UNREACHABLE(); -} - } // namespace systems } // namespace drake + +DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( + class drake::systems::Simulator) diff --git a/systems/analysis/simulator_gflags.h b/systems/analysis/simulator_gflags.h index 606a1b8b3a89..b3b6309579f3 100644 --- a/systems/analysis/simulator_gflags.h +++ b/systems/analysis/simulator_gflags.h @@ -5,7 +5,6 @@ /// Only include this from translation units that declare a `main` function. #include -#include #include diff --git a/systems/analysis/test/initial_value_problem_test.cc b/systems/analysis/test/initial_value_problem_test.cc index 099a6f65ca17..6220c735ea0a 100644 --- a/systems/analysis/test/initial_value_problem_test.cc +++ b/systems/analysis/test/initial_value_problem_test.cc @@ -6,7 +6,6 @@ #include "drake/common/test_utilities/eigen_matrix_compare.h" #include "drake/common/test_utilities/expect_throws_message.h" -#include "drake/systems/analysis/initial_value_problem-inl.h" #include "drake/systems/analysis/integrator_base.h" #include "drake/systems/analysis/runge_kutta2_integrator.h" #include "drake/systems/framework/basic_vector.h" diff --git a/systems/analysis/test/scalar_initial_value_problem_test.cc b/systems/analysis/test/scalar_initial_value_problem_test.cc index b826bc1743d5..8d3ab8ad9275 100644 --- a/systems/analysis/test/scalar_initial_value_problem_test.cc +++ b/systems/analysis/test/scalar_initial_value_problem_test.cc @@ -6,7 +6,6 @@ #include "drake/common/unused.h" #include "drake/systems/analysis/integrator_base.h" #include "drake/systems/analysis/runge_kutta2_integrator.h" -#include "drake/systems/analysis/scalar_initial_value_problem-inl.h" namespace drake { namespace systems { diff --git a/systems/analysis/test_utilities/controlled_spring_mass_system.cc b/systems/analysis/test_utilities/controlled_spring_mass_system.cc index f14a959724c7..2787d4f6c264 100644 --- a/systems/analysis/test_utilities/controlled_spring_mass_system.cc +++ b/systems/analysis/test_utilities/controlled_spring_mass_system.cc @@ -1,6 +1,5 @@ #include "drake/systems/analysis/test_utilities/controlled_spring_mass_system.h" -#include "drake/common/autodiff.h" #include "drake/common/eigen_types.h" #include "drake/systems/framework/diagram_builder.h" #include "drake/systems/primitives/demultiplexer.h" @@ -118,8 +117,8 @@ const { return *plant_; } -template class PidControlledSpringMassSystem; -template class PidControlledSpringMassSystem; - } // namespace systems } // namespace drake + +DRAKE_DEFINE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( + class drake::systems::PidControlledSpringMassSystem) diff --git a/systems/analysis/test_utilities/controlled_spring_mass_system.h b/systems/analysis/test_utilities/controlled_spring_mass_system.h index 7020f193d220..dd75ccff8dbc 100644 --- a/systems/analysis/test_utilities/controlled_spring_mass_system.h +++ b/systems/analysis/test_utilities/controlled_spring_mass_system.h @@ -2,6 +2,7 @@ #include +#include "drake/common/default_scalars.h" #include "drake/common/drake_copyable.h" #include "drake/systems/controllers/pid_controller.h" #include "drake/systems/framework/diagram.h" @@ -69,3 +70,6 @@ class PidControlledSpringMassSystem : public Diagram { } // namespace systems } // namespace drake + +DRAKE_DECLARE_CLASS_TEMPLATE_INSTANTIATIONS_ON_DEFAULT_NONSYMBOLIC_SCALARS( + class drake::systems::PidControlledSpringMassSystem)