Skip to content

Commit

Permalink
Ensure raspi-2 tests always emit test result XML
Browse files Browse the repository at this point in the history
Before the raspi-2 tests are run, we execute some debugging commands
before the test executes that prepare the device accordingly. However
in rare cases these commands may fail, which then completely prevent the
test from executing. Without test execution, we do not create any XML
files.

We want this failure mode to be captured, since the presence of the XML
is used to indicate test failure to Mobile Harness test runners. As a
workaround, we generate an empty XML prior to test execution.

b/257101100

Change-Id: Id937fb674a5c5d37537de8480c71cef940466114
NOKEYCHECK=True
GitOrigin-RevId: b7af67b40c24dc610798b66bfa9f5b37c3816e42
  • Loading branch information
arjungm authored and andrewsavage1 committed Dec 13, 2022
1 parent d52e830 commit 19d87f7
Show file tree
Hide file tree
Showing 3 changed files with 56 additions and 23 deletions.
50 changes: 34 additions & 16 deletions starboard/raspi/shared/launcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,8 @@ def __init__(self, platform, target_name, config, device_id, **kwargs):
signal.signal(signal.SIGINT, functools.partial(_SigIntOrSigTermHandler))
signal.signal(signal.SIGTERM, functools.partial(_SigIntOrSigTermHandler))

self.last_run_pexpect_cmd = ''

def _InitPexpectCommands(self):
"""Initializes all of the pexpect commands needed for running the test."""

Expand Down Expand Up @@ -180,15 +182,15 @@ def _PexpectSpawnAndConnect(self, command):
try:
i = self.pexpect_process.expect(expected_prompts)
if i == 0:
self.pexpect_process.sendline('yes')
self._PexpectSendLine('yes')
elif i == 1:
self.pexpect_process.sendline(Launcher._RASPI_PASSWORD)
self._PexpectSendLine(Launcher._RASPI_PASSWORD)
break
else:
# If any other input comes in, maybe we've logged in with rsa key or
# raspi does not have password. Check if we've logged in by echoing
# a special sentence and expect it back.
self.pexpect_process.sendline('echo ' + Launcher._SSH_LOGIN_SIGNAL)
self._PexpectSendLine('echo ' + Launcher._SSH_LOGIN_SIGNAL)
i = self.pexpect_process.expect([Launcher._SSH_LOGIN_SIGNAL])
break
except pexpect.TIMEOUT:
Expand All @@ -200,6 +202,11 @@ def _PexpectSpawnAndConnect(self, command):
if retry_count > Launcher._PEXPECT_PASSWORD_TIMEOUT_MAX_RETRIES:
raise

def _PexpectSendLine(self, cmd):
"""Send lines to Pexpect and record the last command for logging purposes"""
self.last_run_pexpect_cmd = cmd
self.pexpect_process.sendline(cmd)

def _PexpectReadLines(self):
"""Reads all lines from the pexpect process."""

Expand Down Expand Up @@ -231,8 +238,8 @@ def _PexpectReadLines(self):
raise

def _Sleep(self, val):
self.pexpect_process.sendline('sleep {};echo {}'.format(
val, Launcher._SSH_SLEEP_SIGNAL))
self._PexpectSendLine('sleep {};echo {}'.format(val,
Launcher._SSH_SLEEP_SIGNAL))
self.pexpect_process.expect([Launcher._SSH_SLEEP_SIGNAL])

def _CleanupPexpectProcess(self):
Expand All @@ -242,16 +249,18 @@ def _CleanupPexpectProcess(self):
# Check if kernel logged OOM kill or any other system failure message
if self.return_value:
logging.info('Sending dmesg')
self.pexpect_process.sendline('dmesg -P --color=never | tail -n 100')
self._PexpectSendLine('dmesg -P --color=never | tail -n 100')
time.sleep(3)
try:
self.pexpect_process.readlines()
except pexpect.TIMEOUT:
logging.info('Timeout exception during cleanup command: %s',
self.last_run_pexpect_cmd)
pass
logging.info('Done sending dmesg')

# Send ctrl-c to the raspi and close the process.
self.pexpect_process.sendline(chr(3))
self._PexpectSendLine(chr(3))
time.sleep(1) # Allow a second for normal shutdown
self.pexpect_process.close()

Expand All @@ -263,12 +272,14 @@ def _WaitForPrompt(self):
self.pexpect_process.expect(self._RASPI_PROMPT)
break
except pexpect.TIMEOUT:
logging.info('Timeout exception during WaitForPrompt command: %s',
self.last_run_pexpect_cmd)
if self.shutdown_initiated.is_set():
return
retry_count -= 1
if not retry_count:
raise
self.pexpect_process.sendline('echo ' + Launcher._SSH_SLEEP_SIGNAL)
self._PexpectSendLine('echo ' + Launcher._SSH_SLEEP_SIGNAL)
time.sleep(self._INTER_COMMAND_DELAY_SECONDS)

def _KillExistingCobaltProcesses(self):
Expand All @@ -279,11 +290,11 @@ def _KillExistingCobaltProcesses(self):
cause other problems.
"""
logging.info('Killing existing processes')
self.pexpect_process.sendline(
self._PexpectSendLine(
'pkill -9 -ef "(cobalt)|(crashpad_handler)|(elf_loader)"')
self._WaitForPrompt()
# Print the return code of pkill. 0 if a process was halted
self.pexpect_process.sendline('echo PROCKILL:${?}')
self._PexpectSendLine('echo PROCKILL:${?}')
i = self.pexpect_process.expect([r'PROCKILL:0', r'PROCKILL:(\d+)'])
if i == 0:
logging.warning('Forced to pkill existing instance(s) of cobalt. '
Expand Down Expand Up @@ -321,10 +332,14 @@ def Run(self):
self._PexpectSpawnAndConnect(self.ssh_command)
self._Sleep(self._INTER_COMMAND_DELAY_SECONDS)
# Execute debugging commands on the first run
first_run_commands = []
if self.test_result_xml_path:
first_run_commands.append('touch {}'.format(self.test_result_xml_path))
first_run_commands.extend(['free -mh', 'ps -ux', 'df -h'])
if FirstRun():
for cmd in ['free -mh', 'ps -ux', 'df -h']:
for cmd in first_run_commands:
if not self.shutdown_initiated.is_set():
self.pexpect_process.sendline(cmd)
self._PexpectSendLine(cmd)
line = self.pexpect_process.readline()
self.output_file.write(line)
self._WaitForPrompt()
Expand All @@ -334,15 +349,18 @@ def Run(self):
self._Sleep(self._INTER_COMMAND_DELAY_SECONDS)

if not self.shutdown_initiated.is_set():
self.pexpect_process.sendline(self.test_command)
self._PexpectSendLine(self.test_command)
self._PexpectReadLines()

except pexpect.EOF:
logging.exception('pexpect encountered EOF while reading line.')
logging.exception('pexpect encountered EOF while reading line. (cmd: %s)',
self.last_run_pexpect_cmd)
except pexpect.TIMEOUT:
logging.exception('pexpect timed out while reading line.')
logging.exception('pexpect timed out while reading line. (cmd: %s)',
self.last_run_pexpect_cmd)
except Exception: # pylint: disable=broad-except
logging.exception('Error occurred while running test.')
logging.exception('Error occurred while running test. (cmd: %s)',
self.last_run_pexpect_cmd)
finally:
self._CleanupPexpectProcess()

Expand Down
4 changes: 4 additions & 0 deletions starboard/tools/abstract_launcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ def LauncherFactory(platform_name,
output_file=None,
out_directory=None,
env_variables=None,
test_result_xml_path=None,
**kwargs):
"""Creates the proper launcher based upon command line args.
Expand Down Expand Up @@ -90,6 +91,7 @@ def LauncherFactory(platform_name,
output_file=output_file,
out_directory=out_directory,
env_variables=env_variables,
test_result_xml_path=test_result_xml_path,
**kwargs)


Expand Down Expand Up @@ -137,6 +139,8 @@ def __init__(self, platform_name, target_name, config, device_id, **kwargs):
# this variable during initialization.
self.startup_timeout_seconds = 2 * 60

self.test_result_xml_path = kwargs.get("test_result_xml_path", None)

@abc.abstractmethod
def Run(self):
"""Runs the launcher's executable.
Expand Down
25 changes: 18 additions & 7 deletions starboard/tools/testing/test_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -432,7 +432,12 @@ def _RunTest(self,
test_params.append("--gtest_total_shards={}".format(shard_count))
test_params.append("--gtest_shard_index={}".format(shard_index))

# Path to where the test results XML will be created (if applicable).
# For on-device testing, this is w.r.t on device storage.
test_result_xml_path = None

def MakeLauncher():
logging.info("MakeLauncher(): %s", test_result_xml_path)
return abstract_launcher.LauncherFactory(
self.platform,
target_name,
Expand All @@ -443,32 +448,38 @@ def MakeLauncher():
out_directory=self.out_directory,
coverage_directory=self.coverage_directory,
env_variables=env,
test_result_xml_path=test_result_xml_path,
loader_platform=self.loader_platform,
loader_config=self.loader_config,
loader_out_directory=self.loader_out_directory,
launcher_args=self.launcher_args)

logging.info(
"XML test result logging: %s",
("enabled" if
(self.log_xml_results or self.xml_output_dir) else "disabled"))
if self.log_xml_results:
out_path = MakeLauncher().GetDeviceOutputPath()
xml_filename = "{}_testoutput.xml".format(target_name)
if out_path:
xml_path = os.path.join(out_path, xml_filename)
test_result_xml_path = os.path.join(out_path, xml_filename)
else:
xml_path = xml_filename
test_params.append("--gtest_output=xml:{}".format(xml_path))
test_result_xml_path = xml_filename
test_params.append("--gtest_output=xml:{}".format(test_result_xml_path))
logging.info(("Xml results for this test will "
"be logged to '%s'."), xml_path)
"be logged to '%s'."), test_result_xml_path)
elif self.xml_output_dir:
# Have gtest create and save a test result xml
xml_output_subdir = os.path.join(self.xml_output_dir, target_name)
try:
os.makedirs(xml_output_subdir)
except OSError:
pass
xml_output_path = os.path.join(xml_output_subdir, "sponge_log.xml")
test_result_xml_path = os.path.join(xml_output_subdir, "sponge_log.xml")
logging.info("Xml output for this test will be saved to: %s",
xml_output_path)
test_params.append("--gtest_output=xml:%s" % (xml_output_path))
test_result_xml_path)
test_params.append("--gtest_output=xml:%s" % (test_result_xml_path))
logging.info("XML test result path: %s", test_result_xml_path)

# Turn off color codes from output to make it easy to parse
test_params.append("--gtest_color=no")
Expand Down

0 comments on commit 19d87f7

Please sign in to comment.