Skip to content

Commit

Permalink
Make MAX_CONCURRENT_BUILDS configurable using the command line (googl…
Browse files Browse the repository at this point in the history
…e#1212)

* Make MAX_CONCURRENT_BUILDS configurable using the command line

* Update experiment/test_data/experiment-config.yaml

* Honor concurrent_builds for measurers too

* Format

* Required changes

* Remove config/
  • Loading branch information
andreafioraldi authored Aug 19, 2021
1 parent 02d3bec commit 8692edd
Show file tree
Hide file tree
Showing 4 changed files with 61 additions and 17 deletions.
39 changes: 29 additions & 10 deletions experiment/build/builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,10 @@
else:
import experiment.build.local_build as buildlib

# FIXME: Make this configurable for users with the default quota of 10.
# FIXME: Use 10 as default quota.
# Even though it says queueing happen, we end up exceeding limits on "get", so
# be conservative. Use 30 for now since this is limit for FuzzBench service.
MAX_CONCURRENT_BUILDS = 30
DEFAULT_MAX_CONCURRENT_BUILDS = 30

# Build fail retries and wait interval.
NUM_BUILD_RETRIES = 3
Expand Down Expand Up @@ -89,13 +89,17 @@ def build_measurer(benchmark: str) -> bool:
return False


def build_all_measurers(benchmarks: List[str]) -> List[str]:
def build_all_measurers(
benchmarks: List[str],
num_concurrent_builds: int = DEFAULT_MAX_CONCURRENT_BUILDS
) -> List[str]:
"""Build measurers for each benchmark in |benchmarks| in parallel
Returns a list of benchmarks built successfully."""
logger.info('Building measurers.')
filesystem.recreate_directory(build_utils.get_coverage_binaries_dir())
build_measurer_args = [(benchmark,) for benchmark in benchmarks]
successful_calls = retry_build_loop(build_measurer, build_measurer_args)
successful_calls = retry_build_loop(build_measurer, build_measurer_args,
num_concurrent_builds)
logger.info('Done building measurers.')
# Return list of benchmarks (like the list we were passed as an argument)
# instead of returning a list of tuples each containing a benchmark.
Expand All @@ -118,12 +122,14 @@ def split_successes_and_failures(inputs: List,
return successes, failures


def retry_build_loop(build_func: Callable, inputs: List[Tuple]) -> List:
def retry_build_loop(build_func: Callable, inputs: List[Tuple],
num_concurrent_builds: int) -> List:
"""Calls |build_func| in parallel on |inputs|. Repeat on failures up to
|NUM_BUILD_RETRIES| times. Returns the list of inputs that |build_func| was
called successfully on."""
successes = []
with mp_pool.ThreadPool(MAX_CONCURRENT_BUILDS) as pool:
logs.info('Concurrent builds: %d.', num_concurrent_builds)
with mp_pool.ThreadPool(num_concurrent_builds) as pool:
for _ in range(NUM_BUILD_RETRIES):
logs.info('Building using (%s): %s', build_func, inputs)
results = pool.starmap(build_func, inputs)
Expand Down Expand Up @@ -159,8 +165,11 @@ def build_fuzzer_benchmark(fuzzer: str, benchmark: str) -> bool:
return True


def build_all_fuzzer_benchmarks(fuzzers: List[str],
benchmarks: List[str]) -> List[str]:
def build_all_fuzzer_benchmarks(
fuzzers: List[str],
benchmarks: List[str],
num_concurrent_builds: int = DEFAULT_MAX_CONCURRENT_BUILDS
) -> List[str]:
"""Build fuzzer,benchmark images for all pairs of |fuzzers| and |benchmarks|
in parallel. Returns a list of fuzzer,benchmark pairs that built
successfully."""
Expand All @@ -171,7 +180,8 @@ def build_all_fuzzer_benchmarks(fuzzers: List[str],
# TODO(metzman): Use an asynchronous unordered map variant to schedule
# eagerly.
successful_calls = retry_build_loop(build_fuzzer_benchmark,
build_fuzzer_benchmark_args)
build_fuzzer_benchmark_args,
num_concurrent_builds)
logger.info('Done building fuzzer benchmarks.')
return successful_calls

Expand All @@ -192,10 +202,19 @@ def main():
help='Fuzzer names.',
nargs='+',
required=True)

parser.add_argument('-n',
'--num-concurrent-builds',
help='Max concurrent builds allowed.',
type=int,
default=DEFAULT_MAX_CONCURRENT_BUILDS,
required=False)

logs.initialize()
args = parser.parse_args()

build_all_fuzzer_benchmarks(args.fuzzers, args.benchmarks)
build_all_fuzzer_benchmarks(args.fuzzers, args.benchmarks,
args.num_concurrent_builds)

return 0

Expand Down
20 changes: 15 additions & 5 deletions experiment/dispatcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,12 +100,15 @@ def __init__(self, experiment_config_filepath: str):
self.num_trials = self.config['trials']
self.experiment_name = self.config['experiment']
self.git_hash = self.config['git_hash']
self.concurrent_builds = self.config['concurrent_builds']
self.preemptible = self.config.get('preemptible_runners')


def build_images_for_trials(fuzzers: List[str], benchmarks: List[str],
def build_images_for_trials(fuzzers: List[str],
benchmarks: List[str],
num_trials: int,
preemptible: bool) -> List[models.Trial]:
preemptible: bool,
concurrent_builds=None) -> List[models.Trial]:
"""Builds the images needed to run |experiment| and returns a list of trials
that can be run for experiment. This is the number of trials specified in
experiment times each pair of fuzzer+benchmark that builds successfully."""
Expand All @@ -114,8 +117,14 @@ def build_images_for_trials(fuzzers: List[str], benchmarks: List[str],
builder.build_base_images()

# Only build fuzzers for benchmarks whose measurers built successfully.
benchmarks = builder.build_all_measurers(benchmarks)
build_successes = builder.build_all_fuzzer_benchmarks(fuzzers, benchmarks)
if concurrent_builds is None:
benchmarks = builder.build_all_measurers(benchmarks)
build_successes = builder.build_all_fuzzer_benchmarks(
fuzzers, benchmarks)
else:
benchmarks = builder.build_all_measurers(benchmarks, concurrent_builds)
build_successes = builder.build_all_fuzzer_benchmarks(
fuzzers, benchmarks, concurrent_builds)
experiment_name = experiment_utils.get_experiment_name()
trials = []
for fuzzer, benchmark in build_successes:
Expand Down Expand Up @@ -147,7 +156,8 @@ def dispatcher_main():

trials = build_images_for_trials(experiment.fuzzers, experiment.benchmarks,
experiment.num_trials,
experiment.preemptible)
experiment.preemptible,
experiment.concurrent_builds)
_initialize_trials_in_db(trials)

create_work_subdirs(['experiment-folders', 'measurement-folders'])
Expand Down
18 changes: 16 additions & 2 deletions experiment/run_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,8 @@ def start_experiment( # pylint: disable=too-many-arguments
no_seeds=False,
no_dictionaries=False,
oss_fuzz_corpus=False,
allow_uncommitted_changes=False):
allow_uncommitted_changes=False,
concurrent_builds=None):
"""Start a fuzzer benchmarking experiment."""
if not allow_uncommitted_changes:
check_no_uncommitted_changes()
Expand All @@ -239,6 +240,7 @@ def start_experiment( # pylint: disable=too-many-arguments
config['no_dictionaries'] = no_dictionaries
config['oss_fuzz_corpus'] = oss_fuzz_corpus
config['description'] = description
config['concurrent_builds'] = concurrent_builds
config['runner_machine_type'] = config.get('runner_machine_type',
'n1-standard-1')
config['runner_num_cpu_cores'] = config.get('runner_num_cpu_cores', 1)
Expand Down Expand Up @@ -507,6 +509,10 @@ def main():
'--description',
help='Description of the experiment.',
required=False)
parser.add_argument('-cb',
'--concurrent-builds',
help='Max concurrent builds allowed.',
required=False)

all_fuzzers = fuzzer_utils.get_fuzzer_names()
parser.add_argument('-f',
Expand Down Expand Up @@ -544,6 +550,13 @@ def main():
args = parser.parse_args()
fuzzers = args.fuzzers or all_fuzzers

concurrent_builds = args.concurrent_builds
if concurrent_builds is not None:
if not concurrent_builds.isdigit():
parser.error(
"The concurrent build argument must be a positive number")
concurrent_builds = int(concurrent_builds)

start_experiment(args.experiment_name,
args.experiment_config,
args.benchmarks,
Expand All @@ -552,7 +565,8 @@ def main():
no_seeds=args.no_seeds,
no_dictionaries=args.no_dictionaries,
oss_fuzz_corpus=args.oss_fuzz_corpus,
allow_uncommitted_changes=args.allow_uncommitted_changes)
allow_uncommitted_changes=args.allow_uncommitted_changes,
concurrent_builds=concurrent_builds)
return 0


Expand Down
1 change: 1 addition & 0 deletions experiment/test_data/experiment-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,5 +32,6 @@ no_seeds: false
no_dictionaries: false
oss_fuzz_corpus: false
description: "Test experiment"
concurrent_builds: null
runner_num_cpu_cores: 1
runner_machine_type: 'n1-standard-1'

0 comments on commit 8692edd

Please sign in to comment.