diff --git a/src/python/pants/backend/python/tasks/pytest_prep.py b/src/python/pants/backend/python/tasks/pytest_prep.py index 25b0724beea..1bc8b34904c 100644 --- a/src/python/pants/backend/python/tasks/pytest_prep.py +++ b/src/python/pants/backend/python/tasks/pytest_prep.py @@ -35,6 +35,7 @@ def __init__(self, interpreter, pex): pex_info.merge_pex_path(pex_path) # We're now on the sys.path twice. PEXBuilder(pex_path, interpreter=interpreter, pex_info=pex_info).freeze() self._pex = PEX(pex=pex_path, interpreter=interpreter) + self._interpreter = interpreter @property def pex(self): @@ -44,6 +45,14 @@ def pex(self): """ return self._pex + @property + def interpreter(self): + """Return the interpreter used to build this PEX. + + :rtype: :class:`pex.interpreter.PythonInterpreter` + """ + return self._interpreter + @property def config_path(self): """Return the absolute path of the `pytest.ini` config file in this py.test binary. @@ -85,7 +94,7 @@ def execute(self): return pex_info = PexInfo.default() pex_info.entry_point = 'pytest' - pytest_binary = self.create_pex(pex_info) + pytest_binary = self.create_pex(pex_info, pin_selected_interpreter=True) interpreter = self.context.products.get_data(PythonInterpreter) self.context.products.register_data(self.PytestBinary, self.PytestBinary(interpreter, pytest_binary)) diff --git a/src/python/pants/backend/python/tasks/pytest_run.py b/src/python/pants/backend/python/tasks/pytest_run.py index 7f76a1b1089..028416f6be0 100644 --- a/src/python/pants/backend/python/tasks/pytest_run.py +++ b/src/python/pants/backend/python/tasks/pytest_run.py @@ -34,7 +34,7 @@ from pants.util.objects import datatype from pants.util.process_handler import SubprocessProcessHandler from pants.util.py2_compat import configparser -from pants.util.strutil import safe_shlex_split +from pants.util.strutil import safe_shlex_join, safe_shlex_split from pants.util.xml_parser import XmlParser @@ -70,6 +70,7 @@ def files_iter(): return list(files_iter()) +# TODO: convert this into an enum! class PytestResult(TestResult): _SUCCESS_EXIT_CODES = ( 0, @@ -482,6 +483,22 @@ def _test_runner(self, workdirs, test_targets, sources_map): pytest_binary.pex) as coverage_args: yield pytest_binary, [conftest] + coverage_args, get_pytest_rootdir + def _constrain_pytest_interpreter_search_path(self): + """Return an environment for invoking a pex which ensures the use of the selected interpreter. + + When creating the merged pytest pex, we already have an interpreter, and we only invoke that pex + within a pants run, so we can be sure the selected interpreter will be available. Constraining + the interpreter search path at pex runtime ensures that any resolved requirements will be + compatible with the interpreter being used to invoke the merged pytest pex. + """ + pytest_binary = self.context.products.get_data(PytestPrep.PytestBinary) + chosen_interpreter_binary_path = pytest_binary.interpreter.binary + return { + 'PEX_IGNORE_RCFILES': '1', + 'PEX_PYTHON': chosen_interpreter_binary_path, + 'PEX_PYTHON_PATH': chosen_interpreter_binary_path, + } + def _do_run_tests_with_args(self, pex, args): try: env = dict(os.environ) @@ -515,8 +532,10 @@ def _do_run_tests_with_args(self, pex, args): env['PEX_PROFILE_FILENAME'] = '{0}.subprocess.{1:.6f}'.format(profile, time.time()) with self.context.new_workunit(name='run', - cmd=' '.join(pex.cmdline(args)), + cmd=safe_shlex_join(pex.cmdline(args)), labels=[WorkUnitLabel.TOOL, WorkUnitLabel.TEST]) as workunit: + # NB: Constrain the pex environment to ensure the use of the selected interpreter! + env.update(self._constrain_pytest_interpreter_search_path()) rc = self.spawn_and_wait(pex, workunit=workunit, args=args, setsid=True, env=env) return PytestResult.rc(rc) except ErrorWhileTesting: @@ -736,6 +755,8 @@ def _pex_run(self, pex, workunit_name, args, env): with self.context.new_workunit(name=workunit_name, cmd=' '.join(pex.cmdline(args)), labels=[WorkUnitLabel.TOOL, WorkUnitLabel.TEST]) as workunit: + # NB: Constrain the pex environment to ensure the use of the selected interpreter! + env.update(self._constrain_pytest_interpreter_search_path()) process = self._spawn(pex, workunit, args, setsid=False, env=env) return process.wait() diff --git a/src/python/pants/backend/python/tasks/python_execution_task_base.py b/src/python/pants/backend/python/tasks/python_execution_task_base.py index 3c20d464e2d..1c9c4a871bf 100644 --- a/src/python/pants/backend/python/tasks/python_execution_task_base.py +++ b/src/python/pants/backend/python/tasks/python_execution_task_base.py @@ -83,8 +83,25 @@ def extra_files(self): """ return () - def create_pex(self, pex_info=None): - """Returns a wrapped pex that "merges" the other pexes via PEX_PATH.""" + # TODO: remove `pin_selected_interpreter` arg and constrain all resulting pexes to a single == + # interpreter constraint for the globally selected interpreter! This also requries porting + # `PythonRun` to set 'PEX_PYTHON_PATH' and 'PEX_PYTHON' when invoking the resulting pex, see + # https://github.com/pantsbuild/pants/pull/7563. + def create_pex(self, pex_info=None, pin_selected_interpreter=False): + """Returns a wrapped pex that "merges" other pexes produced in previous tasks via PEX_PATH. + + The returned pex will have the pexes from the ResolveRequirements and GatherSources tasks mixed + into it via PEX_PATH. Any 3rdparty requirements declared with self.extra_requirements() will + also be resolved for the global interpreter, and added to the returned pex via PEX_PATH. + + :param pex_info: An optional PexInfo instance to provide to self.merged_pex(). + :type pex_info: :class:`pex.pex_info.PexInfo`, or None + :param bool pin_selected_interpreter: If True, the produced pex will have a single == + interpreter constraint applied to it, for the global + interpreter selected by the SelectInterpreter + task. Otherwise, all of the interpreter constraints from all python targets will applied. + :rtype: :class:`pex.pex.PEX` + """ relevant_targets = self.context.targets( lambda tgt: isinstance(tgt, ( PythonDistribution, PythonRequirementLibrary, PythonTarget, Files))) @@ -116,8 +133,17 @@ def create_pex(self, pex_info=None): # Add the extra requirements first, so they take precedence over any colliding version # in the target set's dependency closure. pexes = [extra_requirements_pex] + pexes - constraints = {constraint for rt in relevant_targets if is_python_target(rt) - for constraint in PythonSetup.global_instance().compatibility_or_constraints(rt)} + + if pin_selected_interpreter: + constraints = {str(interpreter.identity.requirement)} + else: + constraints = { + constraint for rt in relevant_targets if is_python_target(rt) + for constraint in PythonSetup.global_instance().compatibility_or_constraints(rt) + } + self.context.log.debug('target set {} has constraints: {}' + .format(relevant_targets, constraints)) + with self.merged_pex(path, pex_info, interpreter, pexes, constraints) as builder: for extra_file in self.extra_files(): diff --git a/tests/python/pants_test/backend/python/tasks/test_pytest_run.py b/tests/python/pants_test/backend/python/tasks/test_pytest_run.py index 496476f2e50..b39618aa940 100644 --- a/tests/python/pants_test/backend/python/tasks/test_pytest_run.py +++ b/tests/python/pants_test/backend/python/tasks/test_pytest_run.py @@ -25,7 +25,7 @@ from pants.util.py2_compat import configparser from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase from pants_test.subsystem.subsystem_util import init_subsystem -from pants_test.task_test_base import ensure_cached +from pants_test.task_test_base import DeclarativeTaskTestMixin, ensure_cached # NB: Our production code depends on `pytest-cov` which indirectly depends on `coverage`, but in @@ -43,24 +43,52 @@ def extra_requirements(self): return extra_reqs -class PytestTestBase(PythonTaskTestBase): +class PytestTestBase(PythonTaskTestBase, DeclarativeTaskTestMixin): @classmethod def task_type(cls): return PytestRun + run_before_task_types = [ + SelectInterpreter, + ResolveRequirements, + GatherSources, + PytestPrepCoverageVersionPinned, + ] + _CONFTEST_CONTENT = '# I am an existing root-level conftest file.' + _default_test_options = { + 'colors': False, + 'level': 'info' # When debugging a test failure it may be helpful to set this to 'debug'. + } + + def _augment_options(self, options): + new_options = self._default_test_options.copy() + new_options.update(options) + return new_options + def run_tests(self, targets, *passthru_args, **options): """Run the tests in the specified targets, with the specified PytestRun task options.""" - context = self._prepare_test_run(targets, *passthru_args, **options) - self._do_run_tests(context) - return context + self.set_options(**self._augment_options(options)) + with pushd(self.build_root): + result = self.invoke_tasks( + target_roots=targets, + passthru_args=list(passthru_args), + ) + return result.context def run_failing_tests(self, targets, failed_targets, *passthru_args, **options): - context = self._prepare_test_run(targets, *passthru_args, **options) + self.set_options(**self._augment_options(options)) with self.assertRaises(ErrorWhileTesting) as cm: - self._do_run_tests(context) - self.assertEqual(set(failed_targets), set(cm.exception.failed_targets)) + with pushd(self.build_root): + self.invoke_tasks( + target_roots=targets, + passthru_args=list(passthru_args), + ) + exc = cm.exception + # NB: self.invoke_tasks() will attach the tasks' context to the raised exception as ._context! + context = exc._context + self.assertEqual(set(failed_targets), set(exc.failed_targets)) return context def try_run_tests(self, targets, *passthru_args, **options): @@ -70,34 +98,6 @@ def try_run_tests(self, targets, *passthru_args, **options): except ErrorWhileTesting as e: return e.failed_targets - def _prepare_test_run(self, targets, *passthru_args, **options): - test_options = { - 'colors': False, - 'level': 'info' # When debugging a test failure it may be helpful to set this to 'debug'. - } - test_options.update(options) - self.set_options(**test_options) - - # The easiest way to create products required by the PythonTest task is to - # execute the relevant tasks. - si_task_type = self.synthesize_task_subtype(SelectInterpreter, 'si_scope') - rr_task_type = self.synthesize_task_subtype(ResolveRequirements, 'rr_scope') - gs_task_type = self.synthesize_task_subtype(GatherSources, 'gs_scope') - pp_task_type = self.synthesize_task_subtype(PytestPrepCoverageVersionPinned, 'pp_scope') - context = self.context(for_task_types=[si_task_type, rr_task_type, gs_task_type, pp_task_type], - target_roots=targets, - passthru_args=list(passthru_args)) - si_task_type(context, os.path.join(self.pants_workdir, 'si')).execute() - rr_task_type(context, os.path.join(self.pants_workdir, 'rr')).execute() - gs_task_type(context, os.path.join(self.pants_workdir, 'gs')).execute() - pp_task_type(context, os.path.join(self.pants_workdir, 'pp')).execute() - return context - - def _do_run_tests(self, context): - pytest_run_task = self.create_task(context) - with pushd(self.build_root): - pytest_run_task.execute() - class PytestTestEmpty(PytestTestBase): def test_empty(self): @@ -137,12 +137,10 @@ def test_green(self): self.addCleanup(self.AlwaysFailingPexRunPytestRun.set_up()) def do_test_failed_pex_run(self): - with self.assertRaises(TaskError) as cm: - self.run_tests(targets=[self.tests]) - # We expect a `TaskError` as opposed to an `ErrorWhileTesting` since execution fails outside # the actual test run. - self.assertEqual(TaskError, type(cm.exception)) + with self.assertRaises(TaskError): + self.run_tests(targets=[self.tests]) def test_failed_pex_run(self): self.do_test_failed_pex_run() @@ -276,6 +274,18 @@ def test_use_two(self): dependencies = ["lib:core"], coverage = ["core"], ) + +python_tests( + name = "py23-tests", + sources = ["py23_test_source.py"], + compatibility = ['CPython>=2.7,<4'], +) + +python_tests( + name = "py3-and-more-tests", + sources = ["py3_and_more_test_source.py"], + compatibility = ['CPython>=3.6'], +) ''' ) @@ -361,6 +371,9 @@ def null(): assert(False) """)) + self.create_file('tests/py23_test_source.py', '') + self.create_file('tests/py3_and_more_test_source.py', '') + self.create_file('tests/conftest.py', self._CONFTEST_CONTENT) self.app = self.target('tests:app') @@ -377,6 +390,9 @@ def null(): self.all = self.target('tests:all') self.all_with_cov = self.target('tests:all-with-coverage') + self.py23 = self.target('tests:py23-tests') + self.py3_and_more = self.target('tests:py3-and-more-tests') + @ensure_cached(PytestRun, expected_num_artifacts=0) def test_error(self): """Test that a test that errors rather than fails shows up in ErrorWhileTesting.""" @@ -389,6 +405,10 @@ def test_error_outside_function(self): self.run_failing_tests(targets=[self.red, self.green, self.failure_outside_function], failed_targets=[self.red, self.failure_outside_function]) + @ensure_cached(PytestRun, expected_num_artifacts=1) + def test_succeeds_for_intersecting_unique_constraints(self): + self.run_tests(targets=[self.py23, self.py3_and_more]) + @ensure_cached(PytestRun, expected_num_artifacts=1) def test_green(self): self.run_tests(targets=[self.green]) @@ -744,40 +764,40 @@ def marking_tests(self): import os import pytest import unittest - - + + class PassthruTest(unittest.TestCase): def touch(self, path): with open(path, 'wb') as fp: fp.close() - + def mark_test_run(self): caller_frame_record = inspect.stack()[1] - + # For the slot breakdown of a frame record tuple, see: # https://docs.python.org/2/library/inspect.html#the-interpreter-stack _, _, _, caller_func_name, _, _ = caller_frame_record - + marker_file = os.path.join({marker_dir!r}, caller_func_name) self.touch(marker_file) - + def test_one(self): self.mark_test_run() - + @pytest.mark.purple def test_two(self): self.mark_test_run() - + def test_three(self): self.mark_test_run() - + @pytest.mark.red def test_four(self): self.mark_test_run() - + @pytest.mark.green def test_five(self): - self.mark_test_run() + self.mark_test_run() """.format(marker_dir=marker_dir))) def assert_mark(exists, name): diff --git a/tests/python/pants_test/backend/python/tasks/test_pytest_run_integration.py b/tests/python/pants_test/backend/python/tasks/test_pytest_run_integration.py index f574614e2a3..9c4e835a435 100644 --- a/tests/python/pants_test/backend/python/tasks/test_pytest_run_integration.py +++ b/tests/python/pants_test/backend/python/tasks/test_pytest_run_integration.py @@ -20,10 +20,15 @@ class PytestRunIntegrationTest(PantsRunIntegrationTest): testproject = 'testprojects/src/python/interpreter_selection' + # NB: Occasionally running a test in CI may take multiple seconds. The tests in this file which + # use the --timeout-default argument are not testing for performance regressions, but just for + # correctness of timeout behavior, so we set this to a higher value to avoid flakiness. + _non_flaky_timeout_seconds = 5 + def test_pytest_run_timeout_succeeds(self): pants_run = self.run_pants(['clean-all', 'test.pytest', - '--timeout-default=2', + '--timeout-default={}'.format(self._non_flaky_timeout_seconds), 'testprojects/tests/python/pants/timeout:exceeds_timeout', '--', '-kwithin_timeout']) @@ -62,7 +67,7 @@ def test_pytest_run_timeout_cant_terminate(self): pants_run = self.run_pants(['clean-all', 'test.pytest', '--timeout-terminate-wait=2', - '--timeout-default=5', + '--timeout-default={}'.format(self._non_flaky_timeout_seconds), '--coverage=auto', '--cache-ignore', 'testprojects/tests/python/pants/timeout:ignores_terminate']) @@ -87,7 +92,7 @@ def test_pytest_run_killed_by_signal(self): pants_run = self.run_pants(['clean-all', 'test.pytest', '--timeout-terminate-wait=2', - '--timeout-default=5', + '--timeout-default={}'.format(self._non_flaky_timeout_seconds), '--cache-ignore', 'testprojects/tests/python/pants/timeout:terminates_self']) end = time.time() diff --git a/tests/python/pants_test/task_test_base.py b/tests/python/pants_test/task_test_base.py index 179af63ba11..42e1aa64c0b 100644 --- a/tests/python/pants_test/task_test_base.py +++ b/tests/python/pants_test/task_test_base.py @@ -353,6 +353,8 @@ def invoke_tasks(self, target_closure=None, **context_kwargs): `for_task_types`. :return: A datatype containing the created context and the task instances which were executed. :rtype: :class:`DeclarativeTaskTestMixin.TaskInvocationResult` + :raises: If any exception is raised during task execution, the context will be attached to the + exception object as the attribute '_context' with setattr() before re-raising. """ run_before_synthesized_task_types = self._synthesize_task_types(tuple(self.run_before_task_types)) run_after_synthesized_task_types = self._synthesize_task_types(tuple(self.run_after_task_types)) @@ -380,8 +382,13 @@ def invoke_tasks(self, target_closure=None, **context_kwargs): current_task_instance ] + run_after_task_instances - for tsk in all_task_instances: - tsk.execute() + try: + for tsk in all_task_instances: + tsk.execute() + except Exception as e: + # TODO(#7644): Remove this hack before anything more starts relying on it! + setattr(e, '_context', context) + raise e return self.TaskInvocationResult( context=context,