Skip to content

Commit

Permalink
[GR-11367] Removing AveragingBenchmarkMixin since it is available in …
Browse files Browse the repository at this point in the history
…mx_benchmark.

PullRequest: graal/2282
  • Loading branch information
farquet committed Oct 10, 2018
2 parents 20bb0c0 + 04c247a commit a0c5ba9
Showing 1 changed file with 3 additions and 41 deletions.
44 changes: 3 additions & 41 deletions compiler/mx.compiler/mx_graal_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -495,44 +495,6 @@ def getBenchmarkName(self):
return self.currentBenchname


class AveragingBenchmarkMixin(object):
"""Provides utilities for computing the average time of the latest warmup runs.
Note that this mixin expects that the main benchmark class produces a sequence of
datapoints that have the metric.name dimension set to "warmup".
To add the average, this mixin appends a new datapoint whose metric.name dimension
is set to "time".
Benchmarks that mix in this class must manually invoke methods for computing extra
iteration counts and averaging, usually in their run method.
"""

def getExtraIterationCount(self, iterations):
# Uses the number of warmup iterations to calculate the number of extra
# iterations needed by the benchmark to compute a more stable average result.
return min(20, iterations, max(6, int(iterations * 0.4)))

def addAverageAcrossLatestResults(self, results):
# Postprocess results to compute the resulting time by taking the average of last N runs,
# where N is 20% of the maximum number of iterations, at least 5 and at most 10.
warmupResults = [result for result in results if result["metric.name"] == "warmup"]
if warmupResults:
lastIteration = max((result["metric.iteration"] for result in warmupResults))
resultIterations = self.getExtraIterationCount(lastIteration + 1)
totalTimeForAverage = 0.0
for i in range(lastIteration - resultIterations + 1, lastIteration + 1):
result = next((result for result in warmupResults if result["metric.iteration"] == i), None)
if result:
totalTimeForAverage += result["metric.value"]
else:
resultIterations -= 1
averageResult = next(result for result in warmupResults if result["metric.iteration"] == 0).copy()
averageResult["metric.value"] = totalTimeForAverage / resultIterations
averageResult["metric.name"] = "time"
averageResult["metric.average-over"] = resultIterations
results.append(averageResult)


class TemporaryWorkdirMixin(mx_benchmark.VmBenchmarkSuite):
def before(self, bmSuiteArgs):
parser = mx_benchmark.parsers["temporary_workdir_parser"].parser
Expand Down Expand Up @@ -571,7 +533,7 @@ def parserNames(self):
return super(TemporaryWorkdirMixin, self).parserNames() + ["temporary_workdir_parser"]


class BaseDaCapoBenchmarkSuite(mx_benchmark.JavaBenchmarkSuite, AveragingBenchmarkMixin, TemporaryWorkdirMixin):
class BaseDaCapoBenchmarkSuite(mx_benchmark.JavaBenchmarkSuite, mx_benchmark.AveragingBenchmarkMixin, TemporaryWorkdirMixin):
"""Base benchmark suite for DaCapo-based benchmarks.
This suite can only run a single benchmark in one VM invocation.
Expand Down Expand Up @@ -1665,7 +1627,7 @@ def getJMHEntry(self, bmSuiteArgs):
mx_benchmark.add_bm_suite(JMHDistWhiteboxBenchmarkSuite())


class RenaissanceBenchmarkSuite(mx_benchmark.JavaBenchmarkSuite, AveragingBenchmarkMixin, TemporaryWorkdirMixin):
class RenaissanceBenchmarkSuite(mx_benchmark.JavaBenchmarkSuite, mx_benchmark.AveragingBenchmarkMixin, TemporaryWorkdirMixin):
"""Renaissance benchmark suite implementation.
"""
def name(self):
Expand Down Expand Up @@ -1763,7 +1725,7 @@ def run(self, benchmarks, bmSuiteArgs):
mx_benchmark.add_bm_suite(RenaissanceBenchmarkSuite())


class SparkSqlPerfBenchmarkSuite(mx_benchmark.JavaBenchmarkSuite, AveragingBenchmarkMixin, TemporaryWorkdirMixin):
class SparkSqlPerfBenchmarkSuite(mx_benchmark.JavaBenchmarkSuite, mx_benchmark.AveragingBenchmarkMixin, TemporaryWorkdirMixin):
"""Benchmark suite for the spark-sql-perf benchmarks.
"""
def name(self):
Expand Down

0 comments on commit a0c5ba9

Please sign in to comment.