diff --git a/build-support/bin/ci.sh b/build-support/bin/ci.sh
index cd6748c6083..112f514002e 100755
--- a/build-support/bin/ci.sh
+++ b/build-support/bin/ci.sh
@@ -153,8 +153,8 @@ fi
if [[ "${skip_internal_backends:-false}" == "false" ]]; then
banner "Running internal backend python tests"
(
- PANTS_PYTHON_TEST_FAILSOFT=1 \
- ./pants.pex ${PANTS_ARGS[@]} test \
+ ./pants.pex ${PANTS_ARGS[@]} test.pytest \
+ --fail-slow \
$(./pants.pex list pants-plugins/tests/python:: | \
xargs ./pants.pex filter --filter-type=python_tests)
) || die "Internal backend python test failure"
@@ -166,9 +166,10 @@ if [[ "${skip_python:-false}" == "false" ]]; then
fi
banner "Running core python tests${shard_desc}"
(
- PANTS_PY_COVERAGE=paths:pants/ \
- PANTS_PYTHON_TEST_FAILSOFT=1 \
- ./pants.pex ${PANTS_ARGS[@]} test.pytest --shard=${python_unit_shard} \
+ ./pants.pex ${PANTS_ARGS[@]} test.pytest \
+ --fail-slow \
+ --coverage=paths:pants/ \
+ --shard=${python_unit_shard} \
$(./pants.pex list tests/python:: | \
xargs ./pants.pex filter --filter-type=python_tests | \
grep -v integration)
@@ -182,7 +183,7 @@ if [[ "${skip_contrib:-false}" == "false" ]]; then
# test (ie: pants_test.contrib) namespace packages.
# TODO(John Sirois): Get to the bottom of the issue and kill --no-fast, see:
# https://github.com/pantsbuild/pants/issues/1149
- PANTS_PYTHON_TEST_FAILSOFT=1 ./pants.pex ${PANTS_ARGS[@]} test.pytest --no-fast contrib::
+ ./pants.pex ${PANTS_ARGS[@]} test.pytest --fail-slow --no-fast contrib::
) || die "Contrib python test failure"
fi
@@ -192,11 +193,10 @@ if [[ "${skip_integration:-false}" == "false" ]]; then
fi
banner "Running Pants Integration tests${shard_desc}"
(
- PANTS_PYTHON_TEST_FAILSOFT=1 \
- ./pants.pex ${PANTS_ARGS[@]} test.pytest --shard=${python_intg_shard} \
- $(./pants.pex list tests/python:: | \
- xargs ./pants.pex filter --filter-type=python_tests | \
- grep integration)
+ ./pants.pex ${PANTS_ARGS[@]} test.pytest --fail-slow --shard=${python_intg_shard} \
+ $(./pants.pex list tests/python:: | \
+ xargs ./pants.pex filter --filter-type=python_tests | \
+ grep integration)
) || die "Pants Integration test failure"
fi
diff --git a/examples/src/python/example/readme.md b/examples/src/python/example/readme.md
index f1804f85028..20bbe7aab4f 100644
--- a/examples/src/python/example/readme.md
+++ b/examples/src/python/example/readme.md
@@ -331,11 +331,11 @@ parameters:
### Code Coverage
-To get code coverage data, set the `PANTS_PY_COVERAGE` environment variable. If you don't
-configured coverage data, it doesn't do much:
+To get code coverage data, set the `--coverage` flag in `test.pytest` scope.
+If you haven't configured coverage data, it doesn't do much:
:::bash
- $ PANTS_PY_COVERAGE=1 ./pants test examples/tests/python/example_test/hello/greet:greet
+ $ ./pants test.pytest --coverage=1 examples/tests/python/example_test/hello/greet:greet
...lots of build output...
============ 2 passed in 0.23 seconds ============
Name Stmts Miss Cover
@@ -357,12 +357,11 @@ There are 2 alternatives to specifying coverage attributes on all
attributes in-play to form a global coverage specification for the test
run.
-`PANTS_PY_COVERAGE=modules:[module1](,...,[moduleN])` allows
-specification of package or module names to track coverage against. For
-example:
+`--coverage=modules:[module1](,...,[moduleN])` allows specification of
+package or module names to track coverage against. For example:
:::bash
- $ PANTS_PY_COVERAGE=modules:example.hello.greet,example.hello.main ./pants test examples/tests/python/example_test/hello/greet:greet
+ $ ./pants test.pytest --coverage=modules:example.hello.greet,example.hello.main examples/tests/python/example_test/hello/greet:greet
...lots of build output...
============ 2 passed in 0.22 seconds ============
Name Stmts Miss Branch BrMiss Cover
@@ -379,7 +378,7 @@ Similarly, a set of base paths can be specified containing the code for
coverage to be measured over:
:::bash
- $ PANTS_PY_COVERAGE=paths:example/hello ./pants test examples/tests/python/example_test/hello/greet:greet
+ $ ./pants test.pytest --coverage=paths:example/hello examples/tests/python/example_test/hello/greet:greet
...lots of build output...
============ 2 passed in 0.23 seconds ============
Name Stmts Miss Branch BrMiss Cover
diff --git a/src/docs/invoking.md b/src/docs/invoking.md
index d2460935d5d..c134a8c2132 100644
--- a/src/docs/invoking.md
+++ b/src/docs/invoking.md
@@ -91,8 +91,6 @@ to Pants.
* `PANTS_IVY_CACHE_DIR`
* `PANTS_IVY_SETTINGS_XML`
* `PANTS_LEAVE_CHROOT`
-* `PANTS_PY_COVERAGE`
-* `PANTS_PYTHON_TEST_FAILSOFT`
* `PANTS_VERBOSE`
### `pants.ini` Settings File
diff --git a/src/python/pants/backend/python/tasks/BUILD b/src/python/pants/backend/python/tasks/BUILD
index 4a91203b523..19d72a94b2b 100644
--- a/src/python/pants/backend/python/tasks/BUILD
+++ b/src/python/pants/backend/python/tasks/BUILD
@@ -23,6 +23,7 @@ python_library(
'src/python/pants/base:address_lookup_error',
'src/python/pants/base:build_environment',
'src/python/pants/base:build_graph',
+ 'src/python/pants/base:deprecated',
'src/python/pants/base:exceptions',
'src/python/pants/base:generator',
'src/python/pants/base:target',
diff --git a/src/python/pants/backend/python/tasks/pytest_run.py b/src/python/pants/backend/python/tasks/pytest_run.py
index 7aa5ec68ed8..a5467efd4d3 100644
--- a/src/python/pants/backend/python/tasks/pytest_run.py
+++ b/src/python/pants/backend/python/tasks/pytest_run.py
@@ -25,6 +25,7 @@
from pants.backend.python.python_setup import PythonRepos, PythonSetup
from pants.backend.python.targets.python_tests import PythonTests
from pants.backend.python.tasks.python_task import PythonTask
+from pants.base.deprecated import deprecated
from pants.base.exceptions import TaskError, TestFailedTaskError
from pants.base.target import Target
from pants.base.workunit import WorkUnit
@@ -67,6 +68,43 @@ def failed_targets(self):
return self._failed_targets
+def deprecated_env_accessors(removal_version, **replacement_mapping):
+ """Generates accessors for legacy env ver/replacement option pairs.
+
+ The generated accessors issue a deprecation warning when the deprecated env var is present and
+ enjoy the "compile" time removal forcing that normal @deprecated functions and methods do.
+ """
+ def create_accessor(env_name, option_name):
+ @deprecated(removal_version=removal_version,
+ hint_message='Use the {option} option instead of the deprecated {env} environment '
+ 'variable'.format(option=option_name, env=env_name))
+ def deprecated_accessor():
+ return os.environ.get(env_name)
+
+ def accessor(self):
+ value = None
+ if env_name in os.environ:
+ value = deprecated_accessor()
+ sanitized_option_name = option_name.lstrip('-').replace('-', '_')
+ value = self.get_options()[sanitized_option_name] or value
+ return value
+ return accessor
+
+ def decorator(clazz):
+ for env_name, option_name in replacement_mapping.items():
+ setattr(clazz, 'get_DEPRECATED_{}'.format(env_name), create_accessor(env_name, option_name))
+ return clazz
+
+ return decorator
+
+
+# TODO(John Sirois): Replace this helper and use of the accessors it generates with direct options
+# access prior to releasing 0.0.35
+@deprecated_env_accessors(removal_version='0.0.35',
+ JUNIT_XML_BASE='--junit-xml-dir',
+ PANTS_PROFILE='--profile',
+ PANTS_PYTHON_TEST_FAILSOFT='--fail-slow',
+ PANTS_PY_COVERAGE='--coverage')
class PytestRun(PythonTask):
_TESTING_TARGETS = [
# Note: the requirement restrictions on pytest and pytest-cov match those in requirements.txt,
@@ -90,6 +128,17 @@ def register_options(cls, register):
help='Run all tests in a single chroot. If turned off, each test target will '
'create a new chroot, which will be much slower, but more correct, as the'
'isolation verifies that all dependencies are correctly declared.')
+ register('--fail-slow', action='store_true', default=False,
+ help='Do not fail fast on the first test failure in a suite; instead run all tests '
+ 'and report errors only after all tests complete.')
+ register('--junit-xml-dir', metavar='
',
+ help='Specifying a directory causes junit xml results files to be emitted under '
+ 'that dir for each test run.')
+ register('--profile', metavar='',
+ help="Specifying a file path causes tests to be profiled with the profiling data "
+ "emitted to that file (prefix). Note that tests may run in a different cwd, so "
+ "it's best to use an absolute path to make it easy to find the subprocess "
+ "profiles later.")
register('--options', action='append', help='Pass these options to pytest.')
register('--coverage',
help='Emit coverage information for specified paths/modules. Value has two forms: '
@@ -104,10 +153,6 @@ def supports_passthru_args(cls):
def execute(self):
def is_python_test(target):
- # Note that we ignore PythonTestSuite, because we'll see the PythonTests targets
- # it depends on anyway,so if we don't we'll end up running the tests twice.
- # TODO(benjy): Once we're off the 'build' command we can get rid of python_test_suite,
- # or make it an alias of dependencies().
return isinstance(target, PythonTests)
test_targets = list(filter(is_python_test, self.context.targets()))
@@ -131,8 +176,8 @@ def run_tests(self, targets, workunit):
else:
results = {}
# Coverage often throws errors despite tests succeeding, so force failsoft in that case.
- fail_hard = ('PANTS_PYTHON_TEST_FAILSOFT' not in os.environ and
- 'PANTS_PY_COVERAGE' not in os.environ)
+ fail_hard = (not self.get_DEPRECATED_PANTS_PYTHON_TEST_FAILSOFT() and
+ not self.get_DEPRECATED_PANTS_PY_COVERAGE())
for target in targets:
if isinstance(target, PythonTests):
rv = self._do_run_tests([target], workunit)
@@ -203,7 +248,7 @@ def pytest_collection_modifyitems(session, config, items):
@contextmanager
def _maybe_emit_junit_xml(self, targets):
args = []
- xml_base = os.getenv('JUNIT_XML_BASE')
+ xml_base = self.get_DEPRECATED_JUNIT_XML_BASE()
if xml_base and targets:
xml_base = os.path.realpath(xml_base)
xml_path = os.path.join(xml_base, Target.maybe_readable_identify(targets) + '.xml')
@@ -332,7 +377,7 @@ def is_python_lib(tgt):
@contextmanager
def _maybe_emit_coverage_data(self, targets, chroot, pex, workunit):
- coverage = os.environ.get('PANTS_PY_COVERAGE')
+ coverage = self.get_DEPRECATED_PANTS_PY_COVERAGE()
if coverage is None:
yield []
return
@@ -432,12 +477,9 @@ def _do_run_tests_with_args(self, pex, workunit, args):
env = {
'PYTHONUNBUFFERED': '1',
}
- # If profiling a test run, this will enable profiling on the test code itself.
- # Note that tests may run in a different cwd, so it's best to set PANTS_PROFILE
- # to an absolute path to make it easy to find the subprocess profiles later.
- if 'PANTS_PROFILE' in os.environ:
- env['PEX_PROFILE'] = '{0}.subprocess.{1:.6f}'.format(os.environ['PANTS_PROFILE'],
- time.time())
+ profile = self.get_DEPRECATED_PANTS_PROFILE()
+ if profile:
+ env['PEX_PROFILE'] = '{0}.subprocess.{1:.6f}'.format(profile, time.time())
with environment_as(**env):
rc = self._pex_run(pex, workunit, args=args, setsid=True)
return PythonTestResult.rc(rc)
diff --git a/tests/python/pants_test/backend/python/tasks/test_pytest_run.py b/tests/python/pants_test/backend/python/tasks/test_pytest_run.py
index ea06ab25577..4b3ca28a244 100644
--- a/tests/python/pants_test/backend/python/tasks/test_pytest_run.py
+++ b/tests/python/pants_test/backend/python/tasks/test_pytest_run.py
@@ -158,7 +158,7 @@ def test_red(self):
def test_mixed(self):
self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red])
- def test_junit_xml(self):
+ def assert_expected_junit_xml(self, report_basedir, **kwargs):
# We expect xml of the following form:
#
#
@@ -166,28 +166,32 @@ def test_junit_xml(self):
# ...
#
#
+ self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red], **kwargs)
- report_basedir = os.path.join(self.build_root, 'dist', 'junit')
- with environment_as(JUNIT_XML_BASE=report_basedir):
- self.run_failing_tests(targets=[self.red, self.green], failed_targets=[self.red])
+ files = glob.glob(os.path.join(report_basedir, '*.xml'))
+ self.assertEqual(1, len(files), 'Expected 1 file, found: {}'.format(files))
+ junit_xml = files[0]
+ root = DOM.parse(junit_xml).documentElement
- files = glob.glob(os.path.join(report_basedir, '*.xml'))
- self.assertEqual(1, len(files))
- junit_xml = files[0]
- with open(junit_xml) as fp:
- print(fp.read())
+ self.assertEqual(2, len(root.childNodes))
+ self.assertEqual(2, int(root.getAttribute('tests')))
+ self.assertEqual(1, int(root.getAttribute('failures')))
+ self.assertEqual(0, int(root.getAttribute('errors')))
+ self.assertEqual(0, int(root.getAttribute('skips')))
- root = DOM.parse(junit_xml).documentElement
- self.assertEqual(2, len(root.childNodes))
- self.assertEqual(2, int(root.getAttribute('tests')))
- self.assertEqual(1, int(root.getAttribute('failures')))
- self.assertEqual(0, int(root.getAttribute('errors')))
- self.assertEqual(0, int(root.getAttribute('skips')))
+ children_by_test_name = dict((elem.getAttribute('name'), elem) for elem in root.childNodes)
+ self.assertEqual(0, len(children_by_test_name['test_one'].childNodes))
+ self.assertEqual(1, len(children_by_test_name['test_two'].childNodes))
+ self.assertEqual('failure', children_by_test_name['test_two'].firstChild.nodeName)
- children_by_test_name = dict((elem.getAttribute('name'), elem) for elem in root.childNodes)
- self.assertEqual(0, len(children_by_test_name['test_one'].childNodes))
- self.assertEqual(1, len(children_by_test_name['test_two'].childNodes))
- self.assertEqual('failure', children_by_test_name['test_two'].firstChild.nodeName)
+ def test_junit_xml_option(self):
+ basedir = os.path.join(self.build_root, 'dist', 'junit_option')
+ self.assert_expected_junit_xml(basedir, junit_xml_dir=basedir)
+
+ def test_junit_xml_env(self):
+ basedir = os.path.join(self.build_root, 'dist', 'junit_env')
+ with environment_as(JUNIT_XML_BASE=basedir):
+ self.assert_expected_junit_xml(basedir)
def coverage_data_file(self):
return os.path.join(self.build_root, '.coverage')
@@ -200,68 +204,113 @@ def load_coverage_data(self, path):
_, all_statements, not_run_statements, _ = coverage_data.analysis(path)
return all_statements, not_run_statements
- def test_coverage_simple(self):
+ def assert_expected_coverage(self, **kwargs):
self.assertFalse(os.path.isfile(self.coverage_data_file()))
covered_file = os.path.join(self.build_root, 'lib', 'core.py')
+
+ self.run_tests(targets=[self.green], **kwargs)
+ all_statements, not_run_statements = self.load_coverage_data(covered_file)
+ self.assertEqual([1, 2, 5, 6], all_statements)
+ self.assertEqual([6], not_run_statements)
+
+ self.run_failing_tests(targets=[self.red], failed_targets=[self.red], **kwargs)
+ all_statements, not_run_statements = self.load_coverage_data(covered_file)
+ self.assertEqual([1, 2, 5, 6], all_statements)
+ self.assertEqual([2], not_run_statements)
+
+ self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red], **kwargs)
+ all_statements, not_run_statements = self.load_coverage_data(covered_file)
+ self.assertEqual([1, 2, 5, 6], all_statements)
+ self.assertEqual([], not_run_statements)
+
+ # The all target has no coverage attribute and the code under test does not follow the
+ # auto-discover pattern so we should get no coverage.
+ self.run_failing_tests(targets=[self.all], failed_targets=[self.all], **kwargs)
+ all_statements, not_run_statements = self.load_coverage_data(covered_file)
+ self.assertEqual([1, 2, 5, 6], all_statements)
+ self.assertEqual([1, 2, 5, 6], not_run_statements)
+
+ self.run_failing_tests(targets=[self.all_with_coverage],
+ failed_targets=[self.all_with_coverage],
+ **kwargs)
+ all_statements, not_run_statements = self.load_coverage_data(covered_file)
+ self.assertEqual([1, 2, 5, 6], all_statements)
+ self.assertEqual([], not_run_statements)
+
+ def test_coverage_simple_option(self):
+ # TODO(John Sirois): Consider eliminating support for "simple" coverage or at least formalizing
+ # the coverage option value that turns this on to "1" or "all" or "simple" = anything formal.
+ self.assert_expected_coverage(coverage='1')
+
+ def test_coverage_simple_env(self):
with environment_as(PANTS_PY_COVERAGE='1'):
- self.run_tests(targets=[self.green])
- all_statements, not_run_statements = self.load_coverage_data(covered_file)
- self.assertEqual([1, 2, 5, 6], all_statements)
- self.assertEqual([6], not_run_statements)
-
- self.run_failing_tests(targets=[self.red], failed_targets=[self.red])
- all_statements, not_run_statements = self.load_coverage_data(covered_file)
- self.assertEqual([1, 2, 5, 6], all_statements)
- self.assertEqual([2], not_run_statements)
-
- self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red])
- all_statements, not_run_statements = self.load_coverage_data(covered_file)
- self.assertEqual([1, 2, 5, 6], all_statements)
- self.assertEqual([], not_run_statements)
-
- # The all target has no coverage attribute and the code under test does not follow the
- # auto-discover pattern so we should get no coverage.
- self.run_failing_tests(targets=[self.all], failed_targets=[self.all])
- all_statements, not_run_statements = self.load_coverage_data(covered_file)
- self.assertEqual([1, 2, 5, 6], all_statements)
- self.assertEqual([1, 2, 5, 6], not_run_statements)
-
- self.run_failing_tests(targets=[self.all_with_coverage], failed_targets=[self.all_with_coverage])
- all_statements, not_run_statements = self.load_coverage_data(covered_file)
- self.assertEqual([1, 2, 5, 6], all_statements)
- self.assertEqual([], not_run_statements)
-
- def test_coverage_modules(self):
+ self.assert_expected_coverage()
+
+ def assert_modules_dne(self, **kwargs):
self.assertFalse(os.path.isfile(self.coverage_data_file()))
covered_file = os.path.join(self.build_root, 'lib', 'core.py')
+
+ # modules: should trump .coverage
+ self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red], **kwargs)
+ all_statements, not_run_statements = self.load_coverage_data(covered_file)
+ self.assertEqual([1, 2, 5, 6], all_statements)
+ self.assertEqual([1, 2, 5, 6], not_run_statements)
+
+ def test_coverage_modules_dne_option(self):
+ self.assert_modules_dne(coverage='modules:does_not_exist,nor_does_this')
+
+ def test_coverage_modules_dne_env(self):
with environment_as(PANTS_PY_COVERAGE='modules:does_not_exist,nor_does_this'):
- # modules: should trump .coverage
- self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red])
- all_statements, not_run_statements = self.load_coverage_data(covered_file)
- self.assertEqual([1, 2, 5, 6], all_statements)
- self.assertEqual([1, 2, 5, 6], not_run_statements)
+ self.assert_modules_dne()
+
+ def assert_modules(self, **kwargs):
+ self.assertFalse(os.path.isfile(self.coverage_data_file()))
+ covered_file = os.path.join(self.build_root, 'lib', 'core.py')
+
+ self.run_failing_tests(targets=[self.all], failed_targets=[self.all], **kwargs)
+ all_statements, not_run_statements = self.load_coverage_data(covered_file)
+ self.assertEqual([1, 2, 5, 6], all_statements)
+ self.assertEqual([], not_run_statements)
+ def test_coverage_modules_option(self):
+ self.assert_modules(coverage='modules:core')
+
+ def test_coverage_modules_env(self):
with environment_as(PANTS_PY_COVERAGE='modules:core'):
- self.run_failing_tests(targets=[self.all], failed_targets=[self.all])
- all_statements, not_run_statements = self.load_coverage_data(covered_file)
- self.assertEqual([1, 2, 5, 6], all_statements)
- self.assertEqual([], not_run_statements)
+ self.assert_modules()
- def test_coverage_paths(self):
+ def assert_paths_dne(self, **kwargs):
self.assertFalse(os.path.isfile(self.coverage_data_file()))
covered_file = os.path.join(self.build_root, 'lib', 'core.py')
+
+ # paths: should trump .coverage
+ self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red], **kwargs)
+ all_statements, not_run_statements = self.load_coverage_data(covered_file)
+ self.assertEqual([1, 2, 5, 6], all_statements)
+ self.assertEqual([1, 2, 5, 6], not_run_statements)
+
+ def test_coverage_paths_dne_option(self):
+ self.assert_paths_dne(coverage='paths:does_not_exist/,nor_does_this/')
+
+ def test_coverage_paths_dne_env(self):
with environment_as(PANTS_PY_COVERAGE='paths:does_not_exist/,nor_does_this/'):
- # paths: should trump .coverage
- self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red])
- all_statements, not_run_statements = self.load_coverage_data(covered_file)
- self.assertEqual([1, 2, 5, 6], all_statements)
- self.assertEqual([1, 2, 5, 6], not_run_statements)
+ self.assert_paths_dne()
+
+ def assert_paths(self, **kwargs):
+ self.assertFalse(os.path.isfile(self.coverage_data_file()))
+ covered_file = os.path.join(self.build_root, 'lib', 'core.py')
+
+ self.run_failing_tests(targets=[self.all], failed_targets=[self.all], **kwargs)
+ all_statements, not_run_statements = self.load_coverage_data(covered_file)
+ self.assertEqual([1, 2, 5, 6], all_statements)
+ self.assertEqual([], not_run_statements)
+
+ def test_coverage_paths_option(self):
+ self.assert_paths(coverage='paths:core.py')
+ def test_coverage_paths_env(self):
with environment_as(PANTS_PY_COVERAGE='paths:core.py'):
- self.run_failing_tests(targets=[self.all], failed_targets=[self.all])
- all_statements, not_run_statements = self.load_coverage_data(covered_file)
- self.assertEqual([1, 2, 5, 6], all_statements)
- self.assertEqual([], not_run_statements)
+ self.assert_paths()
def test_sharding(self):
self.run_failing_tests(targets=[self.red, self.green], failed_targets=[self.red], shard='0/2')