Skip to content

Commit

Permalink
[GR-27485] Add a pure-startup metric to our benchmarks.
Browse files Browse the repository at this point in the history
PullRequest: graal/7657
  • Loading branch information
fmorcos committed Dec 15, 2020
2 parents 50fead2 + 3adc9fb commit 9001fdb
Showing 1 changed file with 22 additions and 10 deletions.
32 changes: 22 additions & 10 deletions sulong/mx.sulong/mx_sulong_benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
#
import re
import shutil
import time

import mx, mx_benchmark, mx_sulong
import os
Expand Down Expand Up @@ -132,30 +133,36 @@ def successPatterns(self):

def rules(self, out, benchmarks, bmSuiteArgs):
return [
SulongBenchmarkRule(
r'^first [\d]+ warmup iterations (?P<benchmark>[\S]+):(?P<line>([ ,]+(?:\d+(?:\.\d+)?))+)',
{
SulongBenchmarkRule(r'^first [\d]+ warmup iterations (?P<benchmark>[\S]+):(?P<line>([ ,]+(?:\d+(?:\.\d+)?))+)', {
"benchmark": ("<benchmark>", str),
"metric.name": "warmup",
"metric.type": "numeric",
"metric.value": ("<score>", float),
"metric.value": ("<score>", int),
"metric.score-function": "id",
"metric.better": "lower",
"metric.unit": "us",
"metric.iteration": ("<iteration>", int),
}),
SulongBenchmarkRule(
r'^last [\d]+ iterations (?P<benchmark>[\S]+):(?P<line>([ ,]+(?:\d+(?:\.\d+)?))+)',
{
SulongBenchmarkRule(r'^last [\d]+ iterations (?P<benchmark>[\S]+):(?P<line>([ ,]+(?:\d+(?:\.\d+)?))+)', {
"benchmark": ("<benchmark>", str),
"metric.name": "time",
"metric.type": "numeric",
"metric.value": ("<score>", float),
"metric.value": ("<score>", int),
"metric.score-function": "id",
"metric.better": "lower",
"metric.unit": "us",
"metric.iteration": ("<iteration>", int),
}),
mx_benchmark.StdOutRule(r'^Pure-startup \(microseconds\) (?P<benchmark>[\S]+): (?P<score>\d+)', {
"benchmark": ("<benchmark>", str),
"metric.name": "pure-startup",
"metric.type": "numeric",
"metric.value": ("<score>", int),
"metric.score-function": "id",
"metric.better": "lower",
"metric.unit": "us",
"metric.iteration": ("0", int),
}),
]

def workingDirectory(self, benchmarks, bmSuiteArgs):
Expand All @@ -171,6 +178,11 @@ def createCommandLineArgs(self, benchmarks, bmSuiteArgs):
mx.abort("Please run a specific benchmark (mx benchmark csuite:<benchmark-name>) or all the benchmarks (mx benchmark csuite:*)")
vmArgs = self.vmArgs(bmSuiteArgs)
runArgs = self.runArgs(bmSuiteArgs)
try:
runArgs += ['--time', str(int(time.clock_gettime(time.CLOCK_REALTIME) * 1000000))]
except:
# We can end up here in case the python version we're running on doesn't have clock_gettime or CLOCK_REALTIME.
pass
return vmArgs + [self.bench_to_exec[benchmarks[0]]] + runArgs

def get_vm_registry(self):
Expand Down Expand Up @@ -268,8 +280,8 @@ def name(self):
return "sulong"

def run(self, cwd, args):
bench_file = args[-1]
launcher_args = self.launcher_args(args[:-1]) + [bench_file]
bench_file_and_args = args[-3:]
launcher_args = self.launcher_args(args[:-3]) + bench_file_and_args
if hasattr(self.host_vm(), 'run_launcher'):
result = self.host_vm().run_launcher('lli', launcher_args, cwd)
else:
Expand Down

0 comments on commit 9001fdb

Please sign in to comment.