Skip to content

Commit

Permalink
pin the PEX_PYTHON{,_PATH} running the pytest pex to avoid using inco…
Browse files Browse the repository at this point in the history
…mpatible pytest requirements (pantsbuild#7563)

### Problem

`PytestPrep` will generate a pex merging python sources and 3rdparty requirements. This pex will have all of the unique python interpreter constraints from all targets applied to the pex before being built. When executing that pex, it appears that the wrong python interpreter may be selected when the interpreter constraint set is intersecting -- see the added test `test_succeeds_for_intersecting_unique_constraints()`. This ends up looking like:
```
Failed to execute PEX file, missing macosx_10_14_x86_64-cp-27-cp27m compatible dependencies for:
more-itertools
pytest
funcsigs
pytest-cov
coverage
```

### Solution

- Force the selected interpreter used to create the pytest pex to be used when running it by setting `PEX_PYTHON` and `PEX_PYTHON_PATH`.
- Add testing for the case of targets with intersecting interpreter constraints (such as `['CPython>=2.7,<4', 'CPython>=3.6']`).

### Result

Running python tests targets with overlapping interpreter constraints should work!
  • Loading branch information
cosmicexplorer authored May 1, 2019
1 parent 64a389b commit 823ae84
Show file tree
Hide file tree
Showing 6 changed files with 151 additions and 63 deletions.
11 changes: 10 additions & 1 deletion src/python/pants/backend/python/tasks/pytest_prep.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ def __init__(self, interpreter, pex):
pex_info.merge_pex_path(pex_path) # We're now on the sys.path twice.
PEXBuilder(pex_path, interpreter=interpreter, pex_info=pex_info).freeze()
self._pex = PEX(pex=pex_path, interpreter=interpreter)
self._interpreter = interpreter

@property
def pex(self):
Expand All @@ -44,6 +45,14 @@ def pex(self):
"""
return self._pex

@property
def interpreter(self):
"""Return the interpreter used to build this PEX.
:rtype: :class:`pex.interpreter.PythonInterpreter`
"""
return self._interpreter

@property
def config_path(self):
"""Return the absolute path of the `pytest.ini` config file in this py.test binary.
Expand Down Expand Up @@ -85,7 +94,7 @@ def execute(self):
return
pex_info = PexInfo.default()
pex_info.entry_point = 'pytest'
pytest_binary = self.create_pex(pex_info)
pytest_binary = self.create_pex(pex_info, pin_selected_interpreter=True)
interpreter = self.context.products.get_data(PythonInterpreter)
self.context.products.register_data(self.PytestBinary,
self.PytestBinary(interpreter, pytest_binary))
25 changes: 23 additions & 2 deletions src/python/pants/backend/python/tasks/pytest_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
from pants.util.objects import datatype
from pants.util.process_handler import SubprocessProcessHandler
from pants.util.py2_compat import configparser
from pants.util.strutil import safe_shlex_split
from pants.util.strutil import safe_shlex_join, safe_shlex_split
from pants.util.xml_parser import XmlParser


Expand Down Expand Up @@ -70,6 +70,7 @@ def files_iter():
return list(files_iter())


# TODO: convert this into an enum!
class PytestResult(TestResult):
_SUCCESS_EXIT_CODES = (
0,
Expand Down Expand Up @@ -482,6 +483,22 @@ def _test_runner(self, workdirs, test_targets, sources_map):
pytest_binary.pex) as coverage_args:
yield pytest_binary, [conftest] + coverage_args, get_pytest_rootdir

def _constrain_pytest_interpreter_search_path(self):
"""Return an environment for invoking a pex which ensures the use of the selected interpreter.
When creating the merged pytest pex, we already have an interpreter, and we only invoke that pex
within a pants run, so we can be sure the selected interpreter will be available. Constraining
the interpreter search path at pex runtime ensures that any resolved requirements will be
compatible with the interpreter being used to invoke the merged pytest pex.
"""
pytest_binary = self.context.products.get_data(PytestPrep.PytestBinary)
chosen_interpreter_binary_path = pytest_binary.interpreter.binary
return {
'PEX_IGNORE_RCFILES': '1',
'PEX_PYTHON': chosen_interpreter_binary_path,
'PEX_PYTHON_PATH': chosen_interpreter_binary_path,
}

def _do_run_tests_with_args(self, pex, args):
try:
env = dict(os.environ)
Expand Down Expand Up @@ -515,8 +532,10 @@ def _do_run_tests_with_args(self, pex, args):
env['PEX_PROFILE_FILENAME'] = '{0}.subprocess.{1:.6f}'.format(profile, time.time())

with self.context.new_workunit(name='run',
cmd=' '.join(pex.cmdline(args)),
cmd=safe_shlex_join(pex.cmdline(args)),
labels=[WorkUnitLabel.TOOL, WorkUnitLabel.TEST]) as workunit:
# NB: Constrain the pex environment to ensure the use of the selected interpreter!
env.update(self._constrain_pytest_interpreter_search_path())
rc = self.spawn_and_wait(pex, workunit=workunit, args=args, setsid=True, env=env)
return PytestResult.rc(rc)
except ErrorWhileTesting:
Expand Down Expand Up @@ -736,6 +755,8 @@ def _pex_run(self, pex, workunit_name, args, env):
with self.context.new_workunit(name=workunit_name,
cmd=' '.join(pex.cmdline(args)),
labels=[WorkUnitLabel.TOOL, WorkUnitLabel.TEST]) as workunit:
# NB: Constrain the pex environment to ensure the use of the selected interpreter!
env.update(self._constrain_pytest_interpreter_search_path())
process = self._spawn(pex, workunit, args, setsid=False, env=env)
return process.wait()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,25 @@ def extra_files(self):
"""
return ()

def create_pex(self, pex_info=None):
"""Returns a wrapped pex that "merges" the other pexes via PEX_PATH."""
# TODO: remove `pin_selected_interpreter` arg and constrain all resulting pexes to a single ==
# interpreter constraint for the globally selected interpreter! This also requries porting
# `PythonRun` to set 'PEX_PYTHON_PATH' and 'PEX_PYTHON' when invoking the resulting pex, see
# https://github.com/pantsbuild/pants/pull/7563.
def create_pex(self, pex_info=None, pin_selected_interpreter=False):
"""Returns a wrapped pex that "merges" other pexes produced in previous tasks via PEX_PATH.
The returned pex will have the pexes from the ResolveRequirements and GatherSources tasks mixed
into it via PEX_PATH. Any 3rdparty requirements declared with self.extra_requirements() will
also be resolved for the global interpreter, and added to the returned pex via PEX_PATH.
:param pex_info: An optional PexInfo instance to provide to self.merged_pex().
:type pex_info: :class:`pex.pex_info.PexInfo`, or None
:param bool pin_selected_interpreter: If True, the produced pex will have a single ==
interpreter constraint applied to it, for the global
interpreter selected by the SelectInterpreter
task. Otherwise, all of the interpreter constraints from all python targets will applied.
:rtype: :class:`pex.pex.PEX`
"""
relevant_targets = self.context.targets(
lambda tgt: isinstance(tgt, (
PythonDistribution, PythonRequirementLibrary, PythonTarget, Files)))
Expand Down Expand Up @@ -116,8 +133,17 @@ def create_pex(self, pex_info=None):
# Add the extra requirements first, so they take precedence over any colliding version
# in the target set's dependency closure.
pexes = [extra_requirements_pex] + pexes
constraints = {constraint for rt in relevant_targets if is_python_target(rt)
for constraint in PythonSetup.global_instance().compatibility_or_constraints(rt)}

if pin_selected_interpreter:
constraints = {str(interpreter.identity.requirement)}
else:
constraints = {
constraint for rt in relevant_targets if is_python_target(rt)
for constraint in PythonSetup.global_instance().compatibility_or_constraints(rt)
}
self.context.log.debug('target set {} has constraints: {}'
.format(relevant_targets, constraints))


with self.merged_pex(path, pex_info, interpreter, pexes, constraints) as builder:
for extra_file in self.extra_files():
Expand Down
122 changes: 71 additions & 51 deletions tests/python/pants_test/backend/python/tasks/test_pytest_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
from pants.util.py2_compat import configparser
from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase
from pants_test.subsystem.subsystem_util import init_subsystem
from pants_test.task_test_base import ensure_cached
from pants_test.task_test_base import DeclarativeTaskTestMixin, ensure_cached


# NB: Our production code depends on `pytest-cov` which indirectly depends on `coverage`, but in
Expand All @@ -43,24 +43,52 @@ def extra_requirements(self):
return extra_reqs


class PytestTestBase(PythonTaskTestBase):
class PytestTestBase(PythonTaskTestBase, DeclarativeTaskTestMixin):
@classmethod
def task_type(cls):
return PytestRun

run_before_task_types = [
SelectInterpreter,
ResolveRequirements,
GatherSources,
PytestPrepCoverageVersionPinned,
]

_CONFTEST_CONTENT = '# I am an existing root-level conftest file.'

_default_test_options = {
'colors': False,
'level': 'info' # When debugging a test failure it may be helpful to set this to 'debug'.
}

def _augment_options(self, options):
new_options = self._default_test_options.copy()
new_options.update(options)
return new_options

def run_tests(self, targets, *passthru_args, **options):
"""Run the tests in the specified targets, with the specified PytestRun task options."""
context = self._prepare_test_run(targets, *passthru_args, **options)
self._do_run_tests(context)
return context
self.set_options(**self._augment_options(options))
with pushd(self.build_root):
result = self.invoke_tasks(
target_roots=targets,
passthru_args=list(passthru_args),
)
return result.context

def run_failing_tests(self, targets, failed_targets, *passthru_args, **options):
context = self._prepare_test_run(targets, *passthru_args, **options)
self.set_options(**self._augment_options(options))
with self.assertRaises(ErrorWhileTesting) as cm:
self._do_run_tests(context)
self.assertEqual(set(failed_targets), set(cm.exception.failed_targets))
with pushd(self.build_root):
self.invoke_tasks(
target_roots=targets,
passthru_args=list(passthru_args),
)
exc = cm.exception
# NB: self.invoke_tasks() will attach the tasks' context to the raised exception as ._context!
context = exc._context
self.assertEqual(set(failed_targets), set(exc.failed_targets))
return context

def try_run_tests(self, targets, *passthru_args, **options):
Expand All @@ -70,34 +98,6 @@ def try_run_tests(self, targets, *passthru_args, **options):
except ErrorWhileTesting as e:
return e.failed_targets

def _prepare_test_run(self, targets, *passthru_args, **options):
test_options = {
'colors': False,
'level': 'info' # When debugging a test failure it may be helpful to set this to 'debug'.
}
test_options.update(options)
self.set_options(**test_options)

# The easiest way to create products required by the PythonTest task is to
# execute the relevant tasks.
si_task_type = self.synthesize_task_subtype(SelectInterpreter, 'si_scope')
rr_task_type = self.synthesize_task_subtype(ResolveRequirements, 'rr_scope')
gs_task_type = self.synthesize_task_subtype(GatherSources, 'gs_scope')
pp_task_type = self.synthesize_task_subtype(PytestPrepCoverageVersionPinned, 'pp_scope')
context = self.context(for_task_types=[si_task_type, rr_task_type, gs_task_type, pp_task_type],
target_roots=targets,
passthru_args=list(passthru_args))
si_task_type(context, os.path.join(self.pants_workdir, 'si')).execute()
rr_task_type(context, os.path.join(self.pants_workdir, 'rr')).execute()
gs_task_type(context, os.path.join(self.pants_workdir, 'gs')).execute()
pp_task_type(context, os.path.join(self.pants_workdir, 'pp')).execute()
return context

def _do_run_tests(self, context):
pytest_run_task = self.create_task(context)
with pushd(self.build_root):
pytest_run_task.execute()


class PytestTestEmpty(PytestTestBase):
def test_empty(self):
Expand Down Expand Up @@ -137,12 +137,10 @@ def test_green(self):
self.addCleanup(self.AlwaysFailingPexRunPytestRun.set_up())

def do_test_failed_pex_run(self):
with self.assertRaises(TaskError) as cm:
self.run_tests(targets=[self.tests])

# We expect a `TaskError` as opposed to an `ErrorWhileTesting` since execution fails outside
# the actual test run.
self.assertEqual(TaskError, type(cm.exception))
with self.assertRaises(TaskError):
self.run_tests(targets=[self.tests])

def test_failed_pex_run(self):
self.do_test_failed_pex_run()
Expand Down Expand Up @@ -276,6 +274,18 @@ def test_use_two(self):
dependencies = ["lib:core"],
coverage = ["core"],
)
python_tests(
name = "py23-tests",
sources = ["py23_test_source.py"],
compatibility = ['CPython>=2.7,<4'],
)
python_tests(
name = "py3-and-more-tests",
sources = ["py3_and_more_test_source.py"],
compatibility = ['CPython>=3.6'],
)
'''
)

Expand Down Expand Up @@ -361,6 +371,9 @@ def null():
assert(False)
"""))

self.create_file('tests/py23_test_source.py', '')
self.create_file('tests/py3_and_more_test_source.py', '')

self.create_file('tests/conftest.py', self._CONFTEST_CONTENT)

self.app = self.target('tests:app')
Expand All @@ -377,6 +390,9 @@ def null():
self.all = self.target('tests:all')
self.all_with_cov = self.target('tests:all-with-coverage')

self.py23 = self.target('tests:py23-tests')
self.py3_and_more = self.target('tests:py3-and-more-tests')

@ensure_cached(PytestRun, expected_num_artifacts=0)
def test_error(self):
"""Test that a test that errors rather than fails shows up in ErrorWhileTesting."""
Expand All @@ -389,6 +405,10 @@ def test_error_outside_function(self):
self.run_failing_tests(targets=[self.red, self.green, self.failure_outside_function],
failed_targets=[self.red, self.failure_outside_function])

@ensure_cached(PytestRun, expected_num_artifacts=1)
def test_succeeds_for_intersecting_unique_constraints(self):
self.run_tests(targets=[self.py23, self.py3_and_more])

@ensure_cached(PytestRun, expected_num_artifacts=1)
def test_green(self):
self.run_tests(targets=[self.green])
Expand Down Expand Up @@ -744,40 +764,40 @@ def marking_tests(self):
import os
import pytest
import unittest
class PassthruTest(unittest.TestCase):
def touch(self, path):
with open(path, 'wb') as fp:
fp.close()
def mark_test_run(self):
caller_frame_record = inspect.stack()[1]
# For the slot breakdown of a frame record tuple, see:
# https://docs.python.org/2/library/inspect.html#the-interpreter-stack
_, _, _, caller_func_name, _, _ = caller_frame_record
marker_file = os.path.join({marker_dir!r}, caller_func_name)
self.touch(marker_file)
def test_one(self):
self.mark_test_run()
@pytest.mark.purple
def test_two(self):
self.mark_test_run()
def test_three(self):
self.mark_test_run()
@pytest.mark.red
def test_four(self):
self.mark_test_run()
@pytest.mark.green
def test_five(self):
self.mark_test_run()
self.mark_test_run()
""".format(marker_dir=marker_dir)))

def assert_mark(exists, name):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,15 @@
class PytestRunIntegrationTest(PantsRunIntegrationTest):
testproject = 'testprojects/src/python/interpreter_selection'

# NB: Occasionally running a test in CI may take multiple seconds. The tests in this file which
# use the --timeout-default argument are not testing for performance regressions, but just for
# correctness of timeout behavior, so we set this to a higher value to avoid flakiness.
_non_flaky_timeout_seconds = 5

def test_pytest_run_timeout_succeeds(self):
pants_run = self.run_pants(['clean-all',
'test.pytest',
'--timeout-default=2',
'--timeout-default={}'.format(self._non_flaky_timeout_seconds),
'testprojects/tests/python/pants/timeout:exceeds_timeout',
'--',
'-kwithin_timeout'])
Expand Down Expand Up @@ -62,7 +67,7 @@ def test_pytest_run_timeout_cant_terminate(self):
pants_run = self.run_pants(['clean-all',
'test.pytest',
'--timeout-terminate-wait=2',
'--timeout-default=5',
'--timeout-default={}'.format(self._non_flaky_timeout_seconds),
'--coverage=auto',
'--cache-ignore',
'testprojects/tests/python/pants/timeout:ignores_terminate'])
Expand All @@ -87,7 +92,7 @@ def test_pytest_run_killed_by_signal(self):
pants_run = self.run_pants(['clean-all',
'test.pytest',
'--timeout-terminate-wait=2',
'--timeout-default=5',
'--timeout-default={}'.format(self._non_flaky_timeout_seconds),
'--cache-ignore',
'testprojects/tests/python/pants/timeout:terminates_self'])
end = time.time()
Expand Down
Loading

0 comments on commit 823ae84

Please sign in to comment.