Skip to content

Commit

Permalink
[lit] Add an --output option, for writing results in a machine readab…
Browse files Browse the repository at this point in the history
…le form.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@190738 91177308-0d34-0410-b5e6-96231b3b80d8
  • Loading branch information
ddunbar committed Sep 14, 2013
1 parent e94e098 commit 2849503
Show file tree
Hide file tree
Showing 5 changed files with 96 additions and 3 deletions.
21 changes: 21 additions & 0 deletions utils/lit/lit/Test.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,21 @@ def __repr__(self):

class MetricValue(object):
def format(self):
"""
format() -> str
Convert this metric to a string suitable for displaying as part of the
console output.
"""
raise RuntimeError("abstract method")

def todata(self):
"""
todata() -> json-serializable data
Convert this metric to content suitable for serializing in the JSON test
output.
"""
raise RuntimeError("abstract method")

class IntMetricValue(MetricValue):
Expand All @@ -44,13 +59,19 @@ def __init__(self, value):
def format(self):
return str(self.value)

def todata(self):
return self.value

class RealMetricValue(MetricValue):
def __init__(self, value):
self.value = value

def format(self):
return '%.4f' % self.value

def todata(self):
return self.value

# Test results.

class Result(object):
Expand Down
49 changes: 48 additions & 1 deletion utils/lit/lit/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,45 @@ def update(self, test):
# Ensure the output is flushed.
sys.stdout.flush()

def write_test_results(run, lit_config, testing_time, output_path):
try:
import json
except ImportError:
lit_config.fatal('test output unsupported with Python 2.5')

# Construct the data we will write.
data = {}
# Encode the current lit version as a schema version.
data['__version__'] = lit.__versioninfo__
data['elapsed'] = testing_time
# FIXME: Record some information on the lit configuration used?
# FIXME: Record information from the individual test suites?

# Encode the tests.
data['tests'] = tests_data = []
for test in run.tests:
test_data = {
'name' : test.getFullName(),
'code' : test.result.code.name,
'output' : test.result.output,
'elapsed' : test.result.elapsed }

# Add test metrics, if present.
if test.result.metrics:
test_data['metrics'] = metrics_data = {}
for key, value in test.result.metrics.items():
metrics_data[key] = value.todata()

tests_data.append(test_data)

# Write the output.
f = open(output_path, 'w')
try:
json.dump(data, f, indent=2, sort_keys=True)
f.write('\n')
finally:
f.close()

def main(builtinParameters = {}):
# Bump the GIL check interval, its more important to get any one thread to a
# blocking operation (hopefully exec) than to try and unblock other threads.
Expand Down Expand Up @@ -103,6 +142,9 @@ def main(builtinParameters = {}):
group.add_option("-v", "--verbose", dest="showOutput",
help="Show all test output",
action="store_true", default=False)
group.add_option("-o", "--output", dest="output_path",
help="Write test results to the provided path",
action="store", type=str, metavar="PATH")
group.add_option("", "--no-progress-bar", dest="useProgressBar",
help="Do not use curses based progress bar",
action="store_false", default=True)
Expand Down Expand Up @@ -289,8 +331,13 @@ def main(builtinParameters = {}):
sys.exit(2)
display.finish()

testing_time = time.time() - startTime
if not opts.quiet:
print('Testing Time: %.2fs'%(time.time() - startTime))
print('Testing Time: %.2fs' % (testing_time,))

# Write out the test data, if requested.
if opts.output_path is not None:
write_test_results(run, litConfig, testing_time, opts.output_path)

# List test results organized by kind.
hasFailures = False
Expand Down
2 changes: 1 addition & 1 deletion utils/lit/tests/Inputs/test-data/metrics.ini
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[global]
result_code = PASS
result_output = 'Test passed.'
result_output = Test passed.

[results]
value0 = 1
Expand Down
6 changes: 5 additions & 1 deletion utils/lit/tests/lit.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ config.excludes = ['Inputs']
config.test_source_root = os.path.dirname(__file__)
config.test_exec_root = config.test_source_root

config.target_triple = None
config.target_triple = '(unused)'

src_root = os.path.join(config.test_source_root, '..')
config.environment['PYTHONPATH'] = src_root
Expand All @@ -39,3 +39,7 @@ config.substitutions.append(('%{python}', sys.executable))
if lit_config.params.get('check-coverage', None):
config.environment['COVERAGE_PROCESS_START'] = os.path.join(
os.path.dirname(__file__), ".coveragerc")

# Add a feature to detect the Python version.
config.available_features.add("python%d.%d" % (sys.version_info[0],
sys.version_info[1]))
21 changes: 21 additions & 0 deletions utils/lit/tests/test-output.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# XFAIL: python2.5

# RUN: %{lit} -j 1 -v %{inputs}/test-data --output %t.results.out > %t.out
# RUN: FileCheck < %t.results.out %s

# CHECK: {
# CHECK: "__version__"
# CHECK: "elapsed"
# CHECK-NEXT: "tests": [
# CHECK-NEXT: {
# CHECK-NEXT: "code": "PASS",
# CHECK-NEXT: "elapsed": {{[0-9.]+}},
# CHECK-NEXT: "metrics": {
# CHECK-NEXT: "value0": 1,
# CHECK-NEXT: "value1": 2.3456
# CHECK-NEXT: }
# CHECK-NEXT: "name": "test-data :: metrics.ini",
# CHECK-NEXT: "output": "Test passed."
# CHECK-NEXT: }
# CHECK-NEXT: ]
# CHECK-NEXT: }

0 comments on commit 2849503

Please sign in to comment.