Skip to content

Commit

Permalink
tools/performance: Start gathering sharable infrastructure (RobotLoco…
Browse files Browse the repository at this point in the history
…motion#14505)

* tools/performance: Split useful tooling out from cassie_bench

Relevant to: RobotLocomotion#14464

Gather the sharable bits of experiment execution scripting and port the
cassie benchmark to use it.

This is a first pass at factoring out the common stuff from the
cassie_benchmark tree, for use by other benchmarks. It hoists the basic
techniques for data collection and variance reduction. As this gathers
more uses, it can be polished more.

Also missing from this patch is any documentation or tooling to help
with using the compare.py script from the googlebenchmark package.
  • Loading branch information
rpoyner-tri authored Jan 28, 2021
1 parent 528e338 commit ce36a0f
Show file tree
Hide file tree
Showing 8 changed files with 337 additions and 145 deletions.
8 changes: 1 addition & 7 deletions examples/multibody/cassie_benchmark/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,9 @@ sh_test(
name = "record_results",
size = "small",
srcs = ["record_results.sh"],
# Debug-configured test runs are nice for coverage, but not very useful
# otherwise. Don't waste too much time on them.
args = select({
"//tools/cc_toolchain:debug": ["--benchmark_repetitions=1"],
"//conditions:default": [],
}),
data = [
":cassie_bench",
"//tools/workspace/cc:identify_compiler",
"//tools/performance:record_results",
],
tags = ["no_valgrind_tools"],
)
Expand Down
91 changes: 6 additions & 85 deletions examples/multibody/cassie_benchmark/conduct_experiment
Original file line number Diff line number Diff line change
Expand Up @@ -3,93 +3,14 @@

set -e -u -o pipefail

ME=$(readlink -f $0)
HERE=$(dirname $ME)
TARGET=//examples/multibody/cassie_benchmark:record_results
ME=$(python3 -c 'import os; print(os.path.realpath("'"$0"'"))')
HERE=$(dirname $ME)

NO_TURBO_CONTROL_FILE=/sys/devices/system/cpu/intel_pstate/no_turbo

CPU_GOVERNOR=
NO_TURBO=

die () {
echo $"$@"
exit 1
}

is_default_ubuntu () {
[[ $(uname) = "Linux" && $(echo $(lsb_release -irs)) = "Ubuntu 18.04" ]]
}

is_default_compiler () {
# Use deep bash magic to assert variables are unset.
[[ -z ${CC+x} && -z ${CXX+x} ]]
}

is_supported_cpu () {
[[ -e "$NO_TURBO_CONTROL_FILE" ]]
}

say () {
echo
echo === "$@" ===
echo
}

get_cpu_governor () {
cpupower frequency-info -p |sed -n 's%.*governor "\([^"]*\)".*%\1%pg'
}

set_cpu_governor () {
sudo cpupower frequency-set --governor "$1"
}

get_no_turbo () {
cat "$NO_TURBO_CONTROL_FILE"
}

set_no_turbo () {
sudo sh -c "echo $1 > $NO_TURBO_CONTROL_FILE"
}

clean () {
say Restore CPU speed settings.
[[ -n "$CPU_GOVERNOR" ]] && set_cpu_governor "$CPU_GOVERNOR"
[[ -n "$NO_TURBO" ]] && set_no_turbo "$NO_TURBO"
}

say Validate input.
[[ "$#" -ge 1 ]] || die "missing argument: destination directory"
DESTINATION="$1"
OUTPUT_DIR="$1"
shift

say Validate environment.
is_default_ubuntu || die "experiments only supported on default platform"
is_default_compiler || die "experiments only supported with default compiler"
is_supported_cpu || die "experiments only supported with Intel CPUs"

say Validate sudo access, to avoid later interruptions.
sudo -v

say Install tools for CPU speed control.
sudo apt install linux-tools-$(uname -r)

say Build code.
bazel build "$TARGET"

say Wait for lingering activity to subside.
sync
sleep 10

say Control CPU speed variation.
trap clean EXIT
CPU_GOVERNOR=$(get_cpu_governor)
NO_TURBO=$(get_no_turbo)
set_cpu_governor performance
set_no_turbo 1

say Run the experiment.
bazel run "$TARGET" -- "$@"
cd "$HERE"/../../..

say Save data.
"$HERE"/copy_results_to "$DESTINATION"
./tools/performance/benchmark_tool conduct_experiment \
$TARGET "$OUTPUT_DIR" -- "$@"
19 changes: 5 additions & 14 deletions examples/multibody/cassie_benchmark/copy_results_to
Original file line number Diff line number Diff line change
Expand Up @@ -3,19 +3,10 @@

set -e -u -o pipefail

die () {
echo $"$@"
exit 1
}
TARGET=//examples/multibody/cassie_benchmark:record_results
ME=$(python3 -c 'import os; print(os.path.realpath("'"$0"'"))')
HERE=$(dirname $ME)

cd "$HERE"/../../..

[[ "$#" -ge 1 ]] || die "missing argument: destination directory"

DST="$1"

TESTLOGS=$(bazel info bazel-testlogs)
TARGET="examples/multibody/cassie_benchmark/record_results"
SRC="${TESTLOGS}/${TARGET}/test.outputs"

mkdir -p "$DST"
cp -av "$SRC"/* "$DST"
./tools/performance/benchmark_tool copy_results $TARGET "$1"
42 changes: 3 additions & 39 deletions examples/multibody/cassie_benchmark/record_results.sh
Original file line number Diff line number Diff line change
@@ -1,42 +1,6 @@
#!/bin/bash
# Collect context information for a benchmark experiment.
# TODO(rpoyner-tri) find a robust way of recording source code version
# information.

set -e -u -o pipefail

uname -a > ${TEST_UNDECLARED_OUTPUTS_DIR}/kernel.txt || true

# Fill this in with a platform-specific command to control processor affinity,
# if any.
AFFINITY_COMMAND=""

case $(uname) in
Linux)
lsb_release -idrc
# Choosing processor #0 is arbitrary. It is up to experimenters
# to ensure it is reliably idle during experiments.
AFFINITY_COMMAND="taskset 0x1"
;;
Darwin)
sw_vers
;;
*)
echo unknown
;;
esac > ${TEST_UNDECLARED_OUTPUTS_DIR}/os.txt

${TEST_SRCDIR}/drake/tools/workspace/cc/identify_compiler \
> ${TEST_UNDECLARED_OUTPUTS_DIR}/compiler.txt

${AFFINITY_COMMAND} \
${TEST_SRCDIR}/drake/examples/multibody/cassie_benchmark/cassie_bench \
--benchmark_display_aggregates_only=true \
--benchmark_repetitions=9 \
--benchmark_out_format=json \
--benchmark_out=${TEST_UNDECLARED_OUTPUTS_DIR}/results.json \
"$@" \
>& ${TEST_UNDECLARED_OUTPUTS_DIR}/summary.txt

echo Full results are in:
echo ${TEST_UNDECLARED_OUTPUTS_DIR}/
${TEST_SRCDIR}/drake/tools/performance/record_results \
${TEST_SRCDIR}/drake/examples/multibody/cassie_benchmark/cassie_bench \
"$@"
16 changes: 16 additions & 0 deletions tools/performance/BUILD.bazel
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# -*- python -*-

load("//tools/lint:lint.bzl", "add_lint_tests")
load("//tools/skylark:py.bzl", "py_binary")

package(default_visibility = ["//visibility:public"])

py_binary(
name = "record_results",
srcs = ["record_results.py"],
data = [
"//tools/workspace/cc:identify_compiler",
],
)

add_lint_tests(python_lint_extra_srcs = ["benchmark_tool"])
18 changes: 18 additions & 0 deletions tools/performance/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# Tools support for performance analysis

This directory contains tools to help with careful performance measurement and
analysis of Drake programs.

## Benchmarking tools

Included here are two tools to help with well-controlled benchmark experiments:

* `record_results.py` -- run benchmark under bazel, record context and results
* `benchmark_tool` -- outside-bazel tool for experiments and data handling

A fully worked example that integrates these two tools is available at
`drake/examples/multibody/cassie_benchmark`. Some of the history of attempts to
drive variance out of benchmark results is captured in #13902.

TODO(rpoyner-tri): explain how to use compare.py from the googlebenchmark
package to compare stored results from different experiments.
Loading

0 comments on commit ce36a0f

Please sign in to comment.