Skip to content

Commit

Permalink
[lit] Add support for attach arbitrary metrics to test results.
Browse files Browse the repository at this point in the history
 - This is a work-in-progress and all details are subject to change, but I am
   trying to build up support for allowing lit to be used as a driver for
   performance tests (or other tests which might want to record information
   beyond simple PASS/FAIL).

llvm-svn: 190535
  • Loading branch information
ddunbar committed Sep 11, 2013
1 parent 6a44af3 commit 9aeba49
Show file tree
Hide file tree
Showing 5 changed files with 119 additions and 2 deletions.
43 changes: 42 additions & 1 deletion llvm/utils/lit/lit/Test.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import os

# Test results.
# Test result codes.

class ResultCode(object):
"""Test result codes."""
Expand Down Expand Up @@ -31,6 +31,28 @@ def __repr__(self):
UNRESOLVED = ResultCode('UNRESOLVED', True)
UNSUPPORTED = ResultCode('UNSUPPORTED', False)

# Test metric values.

class MetricValue(object):
def format(self):
raise RuntimeError("abstract method")

class IntMetricValue(MetricValue):
def __init__(self, value):
self.value = value

def format(self):
return str(self.value)

class RealMetricValue(MetricValue):
def __init__(self, value):
self.value = value

def format(self):
return '%.4f' % self.value

# Test results.

class Result(object):
"""Wrapper for the results of executing an individual test."""

Expand All @@ -41,6 +63,25 @@ def __init__(self, code, output='', elapsed=None):
self.output = output
# The wall timing to execute the test, if timing.
self.elapsed = elapsed
# The metrics reported by this test.
self.metrics = {}

def addMetric(self, name, value):
"""
addMetric(name, value)
Attach a test metric to the test result, with the given name and list of
values. It is an error to attempt to attach the metrics with the same
name multiple times.
Each value must be an instance of a MetricValue subclass.
"""
if name in self.metrics:
raise ValueError("result already includes metrics for %r" % (
name,))
if not isinstance(value, MetricValue):
raise TypeError("unexpected metric value: %r" % (value,))
self.metrics[name] = value

# Test classes.

Expand Down
15 changes: 14 additions & 1 deletion llvm/utils/lit/lit/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,15 +45,28 @@ def update(self, test):
if self.progressBar:
self.progressBar.clear()

print('%s: %s (%d of %d)' % (test.result.code.name, test.getFullName(),
# Show the test result line.
test_name = test.getFullName()
print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
self.completed, self.numTests))

# Show the test failure output, if requested.
if test.result.code.isFailure and self.opts.showOutput:
print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
'*'*20))
print(test.result.output)
print("*" * 20)

# Report test metrics, if present.
if test.result.metrics:
print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
'*'*10))
items = sorted(test.result.metrics.items())
for metric_name, value in items:
print('%s: %s ' % (metric_name, value.format()))
print("*" * 10)

# Ensure the output is flushed.
sys.stdout.flush()

def main(builtinParameters = {}):
Expand Down
44 changes: 44 additions & 0 deletions llvm/utils/lit/tests/Inputs/test-data/lit.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import os
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser

import lit.formats
import lit.Test

class DummyFormat(lit.formats.FileBasedTest):
def execute(self, test, lit_config):
# In this dummy format, expect that each test file is actually just a
# .ini format dump of the results to report.

source_path = test.getSourcePath()

cfg = ConfigParser.ConfigParser()
cfg.read(source_path)

# Create the basic test result.
result_code = cfg.get('global', 'result_code')
result_output = cfg.get('global', 'result_output')
result = lit.Test.Result(getattr(lit.Test, result_code),
result_output)

# Load additional metrics.
for key,value_str in cfg.items('results'):
value = eval(value_str)
if isinstance(value, int):
metric = lit.Test.IntMetricValue(value)
elif isinstance(value, float):
metric = lit.Test.RealMetricValue(value)
else:
raise RuntimeError("unsupported result type")
result.addMetric(key, metric)

return result

config.name = 'test-data'
config.suffixes = ['.ini']
config.test_format = DummyFormat()
config.test_source_root = None
config.test_exec_root = None
config.target_triple = None
7 changes: 7 additions & 0 deletions llvm/utils/lit/tests/Inputs/test-data/metrics.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
[global]
result_code = PASS
result_output = 'Test passed.'

[results]
value0 = 1
value1 = 2.3456
12 changes: 12 additions & 0 deletions llvm/utils/lit/tests/test-data.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Test features related to formats which support reporting additional test data.

# RUN: %{lit} -j 1 -v %{inputs}/test-data > %t.out
# RUN: FileCheck < %t.out %s

# CHECK: -- Testing:

# CHECK: PASS: test-data :: metrics.ini
# CHECK-NEXT: *** TEST 'test-data :: metrics.ini' RESULTS ***
# CHECK-NEXT: value0: 1
# CHECK-NEXT: value1: 2.3456
# CHECK-NEXT: ***

0 comments on commit 9aeba49

Please sign in to comment.