Skip to content

Commit

Permalink
sanitycheck: fix --failed-only handling
Browse files Browse the repository at this point in the history
- Report build errors as errors, not test failures
- Do not try and build/run tests with build failures
- Fix issue with empty reports when running --only-failed
- Report build errors in the detailed and target reports

Signed-off-by: Anas Nashif <[email protected]>
  • Loading branch information
nashif committed Jul 3, 2020
1 parent 95717db commit f04461e
Show file tree
Hide file tree
Showing 2 changed files with 40 additions and 20 deletions.
57 changes: 39 additions & 18 deletions scripts/sanity_chk/sanitylib.py
Original file line number Diff line number Diff line change
Expand Up @@ -1725,7 +1725,7 @@ def run_build(self, args=[]):
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "failed"
self.instance.status = "error"
self.instance.reason = "Build failure"

results = {
Expand Down Expand Up @@ -1783,7 +1783,7 @@ def run_cmake(self, args=[]):
results = {'msg': msg, 'filter': filter_results}

else:
self.instance.status = "failed"
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
Expand Down Expand Up @@ -1973,7 +1973,7 @@ def process(self, message):
# The build process, call cmake and build with configured generator
if op == "cmake":
results = self.cmake()
if self.instance.status == "failed":
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
pipeline.put({"op": "report", "test": self.instance})
Expand All @@ -1993,7 +1993,7 @@ def process(self, message):
results = self.build()

if not results:
self.instance.status = "failed"
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
Expand Down Expand Up @@ -2060,7 +2060,7 @@ def report_out(self):
self.suite.total_done += 1
instance = self.instance

if instance.status in ["failed", "timeout"]:
if instance.status in ["error", "failed", "timeout"]:
self.suite.total_failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
Expand Down Expand Up @@ -2099,7 +2099,7 @@ def report_out(self):
self.suite.total_done, total_tests_width, self.suite.total_tests, instance.platform.name,
instance.testcase.name, status, more_info))

if instance.status in ["failed", "timeout"]:
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Expand Down Expand Up @@ -2658,6 +2658,8 @@ def apply_filters(self, **kwargs):
self.device_testing,
self.fixtures
)
for t in tc.cases:
instance.results[t] = None

if device_testing_filter:
for h in self.connected_hardware:
Expand Down Expand Up @@ -2815,7 +2817,7 @@ def add_tasks_to_queue(self, test_only=False):

def execute(self):
def calc_one_elf_size(instance):
if instance.status not in ["failed", "skipped"]:
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
Expand Down Expand Up @@ -2943,7 +2945,6 @@ def process_log(log_file):


def xunit_report(self, filename, platform=None, full_report=False, append=False):

total = 0
if platform:
selected = [platform]
Expand Down Expand Up @@ -2978,7 +2979,7 @@ def xunit_report(self, filename, platform=None, full_report=False, append=False)
else:
fails += 1
else:
if instance.status in ["failed", "timeout"]:
if instance.status in ["error", "failed", "timeout"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
Expand All @@ -2999,10 +3000,20 @@ def xunit_report(self, filename, platform=None, full_report=False, append=False)
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
eleTestsuite = eleTestsuites.findall(f'testsuite/[@name="{p}"]')[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skip'] = "%d" % skips
ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
if ts:
eleTestsuite = ts[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skip'] = "%d" % skips
else:
logger.info(f"Did not find any existing results for {p}")
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skip="%s" % (skips))

else:
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
Expand Down Expand Up @@ -3039,6 +3050,7 @@ def xunit_report(self, filename, platform=None, full_report=False, append=False)
type="failure",
message="failed")
else:

el = ET.SubElement(
eleTestcase,
'error',
Expand All @@ -3048,28 +3060,37 @@ def xunit_report(self, filename, platform=None, full_report=False, append=False)
log_file = os.path.join(p, "handler.log")
el.text = self.process_log(log_file)

elif instance.results[k] == 'PASS':
pass
elif instance.results[k] == 'SKIP':
el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
else:
el = ET.SubElement(
eleTestcase,
'skipped',
type="skipped",
message="Skipped")
'error',
type="error",
message=f"{instance.reason}")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])

# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"]'):
eleTestsuite.remove(tc)

eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["failed", "timeout"]:
if instance.status in ["error", "failed", "timeout"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)

p = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(p, "build.log")
hl = os.path.join(p, "handler.log")
Expand Down Expand Up @@ -3105,7 +3126,7 @@ def csv_report(self, filename):
"handler": instance.platform.simulation}

rowdict["status"] = instance.status
if instance.status not in ["failed", "timeout"]:
if instance.status not in ["error", "failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
Expand Down
3 changes: 1 addition & 2 deletions scripts/sanitycheck
Original file line number Diff line number Diff line change
Expand Up @@ -916,7 +916,6 @@ def main():
if options.test_tree:
for pre, _, node in RenderTree(testsuite):
print("%s%s" % (pre, node.name))

return

discards = []
Expand All @@ -927,7 +926,7 @@ def main():
last_run = os.path.join(options.outdir, "sanitycheck.csv")

if options.only_failed:
suite.load_from_file(last_run, filter_status=['skipped', 'passed'])
suite.load_from_file(last_run, filter_status=['error', 'skipped', 'passed'])
suite.selected_platforms = set(p.platform.name for p in suite.instances.values())
elif options.load_tests:
suite.load_from_file(options.load_tests)
Expand Down

0 comments on commit f04461e

Please sign in to comment.