diff --git a/build-support/bin/release.sh b/build-support/bin/release.sh
index 1a222cc405b..4c3520546d7 100755
--- a/build-support/bin/release.sh
+++ b/build-support/bin/release.sh
@@ -98,6 +98,14 @@ function pkg_pants_install_test() {
      == "${version}" ]] || die "Installed version of pants does not match requested version!"
 }
 
+function pkg_testutil_install_test() {
+  local version=$1
+  shift
+  local PIP_ARGS=("$@")
+  pip install "${PIP_ARGS[@]}" "pantsbuild.pants.testutil==${version}" && \
+  python -c "import pants.testutil"
+}
+
 function pkg_testinfra_install_test() {
   local version=$1
   shift
diff --git a/contrib/jax_ws/tests/python/pants_test/contrib/jax_ws/tasks/BUILD b/contrib/jax_ws/tests/python/pants_test/contrib/jax_ws/tasks/BUILD
index c95d9330a84..d0b2b5946ca 100644
--- a/contrib/jax_ws/tests/python/pants_test/contrib/jax_ws/tasks/BUILD
+++ b/contrib/jax_ws/tests/python/pants_test/contrib/jax_ws/tasks/BUILD
@@ -16,6 +16,7 @@ python_tests(
   sources=['test_jax_ws_gen_integration.py'],
   dependencies=[
     'tests/python/pants_test:int-test',
+    'tests/python/pants_test/testutils:file_test_util',
     'contrib/jax_ws:wsdl_tests_directory',
   ],
   tags={'integration'},
diff --git a/contrib/scrooge/tests/python/pants_test/contrib/scrooge/tasks/BUILD b/contrib/scrooge/tests/python/pants_test/contrib/scrooge/tasks/BUILD
index 191777c8111..7181120dbae 100644
--- a/contrib/scrooge/tests/python/pants_test/contrib/scrooge/tasks/BUILD
+++ b/contrib/scrooge/tests/python/pants_test/contrib/scrooge/tasks/BUILD
@@ -7,7 +7,7 @@ python_tests(
   dependencies=[
     'contrib/scrooge/src/python/pants/contrib/scrooge/tasks:java_thrift_library_fingerprint_strategy',
     'src/python/pants/backend/codegen/thrift/java',
-    'tests/python/pants_test:task_test_base',
+    'tests/python/pants_test:test_base',
   ]
 )
 
diff --git a/src/python/pants/backend/python/subsystems/BUILD b/src/python/pants/backend/python/subsystems/BUILD
index 47f31277aa2..2465b8b6c2e 100644
--- a/src/python/pants/backend/python/subsystems/BUILD
+++ b/src/python/pants/backend/python/subsystems/BUILD
@@ -26,7 +26,7 @@ python_tests(
     'src/python/pants/backend/python/targets',
     'src/python/pants/build_graph',
     'src/python/pants/util:contextutil',
-    'tests/python/pants_test/subsystem',
+    'tests/python/pants_test/subsystem:subsystem_utils',
     'tests/python/pants_test:test_base',
   ],
 )
diff --git a/src/python/pants/releases/packages.py b/src/python/pants/releases/packages.py
index 7d225ad6377..0f8c8daa784 100644
--- a/src/python/pants/releases/packages.py
+++ b/src/python/pants/releases/packages.py
@@ -82,6 +82,7 @@ def core_packages():
   bdist_wheel_flags = ("--py-limited-api", "cp36")
   return {
     Package("pantsbuild.pants", "//src/python/pants:pants-packaged", bdist_wheel_flags=bdist_wheel_flags),
+    Package("pantsbuild.pants.testutil", "//src/python/pants/testutil:testutil_wheel"),
     Package("pantsbuild.pants.testinfra", "//tests/python/pants_test:test_infra"),
   }
 
diff --git a/src/python/pants/rules/core/BUILD b/src/python/pants/rules/core/BUILD
index 72833041c92..d807d6c6e3b 100644
--- a/src/python/pants/rules/core/BUILD
+++ b/src/python/pants/rules/core/BUILD
@@ -20,8 +20,10 @@ python_tests(
   name = "tests",
   dependencies = [
     ':core',
+    'tests/python/pants_test:test_base',
     'tests/python/pants_test:console_rule_test_base',
-    'tests/python/pants_test/engine:util'
+    'tests/python/pants_test/engine:util',
+    'tests/python/pants_test/subsystem:subsystem_utils',
   ]
 )
 
diff --git a/src/python/pants/testutil/BUILD b/src/python/pants/testutil/BUILD
new file mode 100644
index 00000000000..26aaee44401
--- /dev/null
+++ b/src/python/pants/testutil/BUILD
@@ -0,0 +1,162 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+python_library(
+  name='testutil_wheel',
+  dependencies=[
+    ':int-test-for-export',
+    ':test_base',
+    ':file_test_util',
+    'src/python/pants/testutil/base:context_utils',
+    'src/python/pants/testutil/engine:engine_test_base',
+    'src/python/pants/testutil/engine:util',
+    'src/python/pants/testutil/jvm:jar_task_test_base',
+    'src/python/pants/testutil/jvm:nailgun_task_test_base',
+    'src/python/pants/testutil/jvm:jvm_tool_task_test_base',
+    'src/python/pants/testutil/option',
+    'src/python/pants/testutil/subsystem',
+  ],
+  provides=pants_setup_py(
+    name='pantsbuild.pants.testutil',
+    description='Test support for writing Pants plugins.',
+    namespace_packages=['pants.testutil'],
+    additional_classifiers=[
+      'Topic :: Software Development :: Testing',
+    ]
+  )
+)
+
+python_library(
+  name = 'int-test-for-export',
+  sources = [
+    'pants_run_integration_test.py'
+  ],
+  dependencies = [
+    '//:build_root',
+    '//:pants_pex',
+    ':file_test_util',
+    '3rdparty/python:ansicolors',
+    '3rdparty/python:dataclasses',
+    'src/python/pants/base:build_environment',
+    'src/python/pants/base:build_file',
+    'src/python/pants/base:exiter',
+    'src/python/pants/fs',
+    'src/python/pants/subsystem',
+    'src/python/pants/util:contextutil',
+    'src/python/pants/util:dirutil',
+    'src/python/pants/util:osutil',
+    'src/python/pants/util:process_handler',
+    'src/python/pants/util:strutil',
+    'src/python/pants:entry_point',
+  ]
+)
+
+target(
+  name = 'int-test',
+  dependencies=[
+    ':int-test-for-export',
+    # NB: 'pants_run_integration_test.py' runs ./pants in a subprocess, so test results will depend
+    # on the pants binary and all of its transitive dependencies. Adding the dependencies below is
+    # our best proxy for ensuring that any test target depending on this target will be invalidated
+    # on changes to those undeclared dependencies.
+    'src/python/pants/bin:pants_local_binary',
+    'src/rust/engine',
+    '//:pyproject',
+  ],
+)
+
+
+python_library(
+  name = 'test_base',
+  sources = ['test_base.py'],
+  dependencies = [
+    'src/python/pants/base:build_root',
+    'src/python/pants/base:cmd_line_spec_parser',
+    'src/python/pants/base:exceptions',
+    'src/python/pants/build_graph',
+    'src/python/pants/init',
+    'src/python/pants/source',
+    'src/python/pants/subsystem',
+    'src/python/pants/task',
+    'src/python/pants/testutil/base:context_utils',
+    'src/python/pants/testutil/engine:util',
+    'src/python/pants/testutil/option',
+    'src/python/pants/testutil/subsystem',
+    'src/python/pants/util:collections',
+    'src/python/pants/util:contextutil',
+    'src/python/pants/util:dirutil',
+    'src/python/pants/util:memo',
+    'src/python/pants/util:meta',
+  ]
+)
+
+python_library(
+  name = 'console_rule_test_base',
+  sources = ['console_rule_test_base.py'],
+  dependencies = [
+    ':test_base',
+    'src/python/pants/bin',
+    'src/python/pants/init',
+    'src/python/pants/util:meta',
+  ]
+)
+
+python_library(
+  name = 'task_test_base',
+  sources = ['task_test_base.py'],
+  dependencies = [
+    'src/python/pants/goal:context',
+    'src/python/pants/ivy',
+    'src/python/pants/task',
+    'src/python/pants/util:contextutil',
+    'src/python/pants/util:meta',
+    'src/python/pants/util:memo',
+    'src/python/pants/util:objects',
+    ':test_base',
+  ]
+)
+
+python_library(
+  name='file_test_util',
+  sources=['file_test_util.py'],
+)
+
+python_library(
+  name='git_util',
+  sources=['git_util.py'],
+  dependencies = [
+    'src/python/pants/base:revision',
+    'src/python/pants/scm:git',
+    'src/python/pants/util:contextutil',
+  ],
+)
+
+python_library(
+  name='interpreter_selection_utils',
+  sources=['interpreter_selection_utils.py'],
+)
+
+python_library(
+  name='mock_logger',
+  sources=['mock_logger.py'],
+  dependencies = [
+    'src/python/pants/reporting',
+  ],
+)
+
+python_library(
+  name='pexrc_util',
+  sources=['pexrc_util.py'],
+  dependencies = [
+    ':git_util',
+  ],
+)
+
+python_library(
+  name='process_test_util',
+  sources=['process_test_util.py'],
+  dependencies = [
+    '3rdparty/python:dataclasses',
+    '3rdparty/python:psutil',
+  ],
+)
diff --git a/src/python/pants/testutil/__init__.py b/src/python/pants/testutil/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/src/python/pants/testutil/base/BUILD b/src/python/pants/testutil/base/BUILD
new file mode 100644
index 00000000000..a669d9e2b94
--- /dev/null
+++ b/src/python/pants/testutil/base/BUILD
@@ -0,0 +1,14 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+python_library(
+  name = 'context_utils',
+  sources = ['context_utils.py'],
+  dependencies = [
+    '3rdparty/python/twitter/commons:twitter.common.collections',
+    'src/python/pants/base:workunit',
+    'src/python/pants/build_graph',
+    'src/python/pants/goal:context',
+    'src/python/pants/goal:run_tracker',
+  ]
+)
diff --git a/src/python/pants/testutil/base/__init__.py b/src/python/pants/testutil/base/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/src/python/pants/testutil/base/context_utils.py b/src/python/pants/testutil/base/context_utils.py
new file mode 100644
index 00000000000..e7e357a91cc
--- /dev/null
+++ b/src/python/pants/testutil/base/context_utils.py
@@ -0,0 +1,131 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+import logging
+import sys
+from contextlib import contextmanager
+
+from twitter.common.collections import maybe_list
+
+from pants.base.workunit import WorkUnit
+from pants.build_graph.target import Target
+from pants.goal.context import Context
+from pants.goal.run_tracker import RunTrackerLogger
+
+
+class TestContext(Context):
+  """A Context to use during unittesting.
+
+  :API: public
+
+  Stubs out various dependencies that we don't want to introduce in unit tests.
+
+  TODO: Instead of extending the runtime Context class, create a Context interface and have
+  TestContext and a runtime Context implementation extend that. This will also allow us to
+  isolate the parts of the interface that a Task is allowed to use vs. the parts that the
+  task-running machinery is allowed to use.
+  """
+  class DummyWorkUnit:
+    """A workunit stand-in that sends all output to stderr.
+
+   These outputs are typically only used by subprocesses spawned by code under test, not
+   the code under test itself, and would otherwise go into some reporting black hole.  The
+   testing framework will only display the stderr output when a test fails.
+
+   Provides no other tracking/labeling/reporting functionality. Does not require "opening"
+   or "closing".
+   """
+
+    def output(self, name):
+      return sys.stderr
+
+    def set_outcome(self, outcome):
+      return sys.stderr.write('\nWorkUnit outcome: {}\n'.format(WorkUnit.outcome_string(outcome)))
+
+  class DummyRunTracker:
+    """A runtracker stand-in that does no actual tracking."""
+
+    def __init__(self):
+      self.logger = RunTrackerLogger(self)
+
+    class DummyArtifactCacheStats:
+      def add_hits(self, cache_name, targets): pass
+
+      def add_misses(self, cache_name, targets, causes): pass
+
+    artifact_cache_stats = DummyArtifactCacheStats()
+
+    def report_target_info(self, scope, target, keys, val): pass
+
+
+  class TestLogger(logging.getLoggerClass()):
+    """A logger that converts our structured records into flat ones.
+
+    This is so we can use a regular logger in tests instead of our reporting machinery.
+    """
+
+    def makeRecord(self, name, lvl, fn, lno, msg, args, exc_info, *pos_args, **kwargs):
+      # Python 2 and Python 3 have different arguments for makeRecord().
+      # For cross-compatibility, we are unpacking arguments.
+      # See https://stackoverflow.com/questions/44329421/logging-makerecord-takes-8-positional-arguments-but-11-were-given.
+      msg = ''.join([msg] + [a[0] if isinstance(a, (list, tuple)) else a for a in args])
+      args = []
+      return super(TestContext.TestLogger, self).makeRecord(
+        name, lvl, fn, lno, msg, args, exc_info, *pos_args, **kwargs)
+
+  def __init__(self, *args, **kwargs):
+    super().__init__(*args, **kwargs)
+    logger_cls = logging.getLoggerClass()
+    try:
+      logging.setLoggerClass(self.TestLogger)
+      self._logger = logging.getLogger('test')
+    finally:
+      logging.setLoggerClass(logger_cls)
+
+  @contextmanager
+  def new_workunit(self, name, labels=None, cmd='', log_config=None):
+    """
+    :API: public
+    """
+    sys.stderr.write('\nStarting workunit {}\n'.format(name))
+    yield TestContext.DummyWorkUnit()
+
+  @property
+  def log(self):
+    """
+    :API: public
+    """
+    return self._logger
+
+  def submit_background_work_chain(self, work_chain, parent_workunit_name=None):
+    """
+    :API: public
+    """
+    # Just do the work synchronously, so we don't need a run tracker, background workers and so on.
+    for work in work_chain:
+      for args_tuple in work.args_tuples:
+        work.func(*args_tuple)
+
+  def subproc_map(self, f, items):
+    """
+    :API: public
+    """
+    # Just execute in-process.
+    return list(map(f, items))
+
+
+def create_context_from_options(options, target_roots=None, build_graph=None,
+                                build_configuration=None, address_mapper=None,
+                                console_outstream=None, workspace=None, scheduler=None):
+  """Creates a ``Context`` with the given options and no targets by default.
+
+  :param options: An :class:`pants.option.options.Option`-alike object that supports read methods.
+
+  Other params are as for ``Context``.
+  """
+  run_tracker = TestContext.DummyRunTracker()
+  target_roots = maybe_list(target_roots, Target) if target_roots else []
+  return TestContext(options=options, run_tracker=run_tracker, target_roots=target_roots,
+                     build_graph=build_graph, build_configuration=build_configuration,
+                     address_mapper=address_mapper, console_outstream=console_outstream,
+                     workspace=workspace, scheduler=scheduler)
diff --git a/src/python/pants/testutil/console_rule_test_base.py b/src/python/pants/testutil/console_rule_test_base.py
new file mode 100644
index 00000000000..4ab8c4d3210
--- /dev/null
+++ b/src/python/pants/testutil/console_rule_test_base.py
@@ -0,0 +1,135 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+from io import StringIO
+from typing import Any, List
+
+from pants.engine.console import Console
+from pants.engine.fs import Workspace
+from pants.engine.goal import Goal
+from pants.engine.selectors import Params
+from pants.init.options_initializer import BuildConfigInitializer
+from pants.init.target_roots_calculator import TargetRootsCalculator
+from pants.option.options_bootstrapper import OptionsBootstrapper
+from pants.testutil.test_base import TestBase
+from pants.util.meta import classproperty
+
+
+class ConsoleRuleTestBase(TestBase):
+  """A baseclass useful for testing a Goal defined as a @console_rule.
+
+  :API: public
+  """
+
+  _implicit_args = tuple(['--pants-config-files=[]'])
+
+  @classproperty
+  def goal_cls(cls):
+    """Subclasses must return the Goal type to test.
+
+    :API: public
+    """
+    raise NotImplementedError()
+
+  def setUp(self):
+    super().setUp()
+
+    if not issubclass(self.goal_cls, Goal):
+      raise AssertionError('goal_cls() must return a Goal subclass, got {}'.format(self.goal_cls))
+
+  def execute_rule(self, args=tuple(), env=tuple(), exit_code=0, additional_params: List[Any]=[]):
+    """Executes the @console_rule for this test class.
+
+    :API: public
+
+    Returns the text output of the task.
+    """
+    # Create an OptionsBootstrapper for these args/env, and a captured Console instance.
+    args = self._implicit_args + (self.goal_cls.name,) + tuple(args)
+    env = dict(env)
+    options_bootstrapper = OptionsBootstrapper.create(args=args, env=env)
+    BuildConfigInitializer.get(options_bootstrapper)
+    full_options = options_bootstrapper.get_full_options(list(self.goal_cls.Options.known_scope_infos()))
+    stdout, stderr = StringIO(), StringIO()
+    console = Console(stdout=stdout, stderr=stderr)
+    scheduler = self.scheduler
+    workspace = Workspace(scheduler)
+
+    # Run for the target specs parsed from the args.
+    specs = TargetRootsCalculator.parse_specs(full_options.target_specs, self.build_root)
+    params = Params(specs, console, options_bootstrapper, workspace, *additional_params)
+    actual_exit_code = self.scheduler.run_console_rule(self.goal_cls, params)
+
+    # Flush and capture console output.
+    console.flush()
+    stdout = stdout.getvalue()
+    stderr = stderr.getvalue()
+
+    self.assertEqual(
+        exit_code,
+        actual_exit_code,
+        "Exited with {} (expected {}):\nstdout:\n{}\nstderr:\n{}".format(actual_exit_code, exit_code, stdout, stderr)
+      )
+
+    return stdout
+
+  def assert_entries(self, sep, *output, **kwargs):
+    """Verifies the expected output text is flushed by the console task under test.
+
+    NB: order of entries is not tested, just presence.
+
+    :API: public
+
+    sep:      the expected output separator.
+    *output:  the output entries expected between the separators
+    **kwargs: additional kwargs passed to execute_rule.
+    """
+    # We expect each output line to be suffixed with the separator, so for , and [1,2,3] we expect:
+    # '1,2,3,' - splitting this by the separator we should get ['1', '2', '3', ''] - always an extra
+    # empty string if the separator is properly always a suffix and not applied just between
+    # entries.
+    self.assertEqual(sorted(list(output) + ['']), sorted((self.execute_rule(**kwargs)).split(sep)))
+
+  def assert_console_output(self, *output, **kwargs):
+    """Verifies the expected output entries are emitted by the console task under test.
+
+    NB: order of entries is not tested, just presence.
+
+    :API: public
+
+    *output:  the expected output entries
+    **kwargs: additional kwargs passed to execute_rule.
+    """
+    self.assertEqual(sorted(output), sorted(self.execute_rule(**kwargs).splitlines()))
+
+  def assert_console_output_contains(self, output, **kwargs):
+    """Verifies the expected output string is emitted by the console task under test.
+
+    :API: public
+
+    output:  the expected output entry(ies)
+    **kwargs: additional kwargs passed to execute_rule.
+    """
+    self.assertIn(output, self.execute_rule(**kwargs))
+
+  def assert_console_output_ordered(self, *output, **kwargs):
+    """Verifies the expected output entries are emitted by the console task under test.
+
+    NB: order of entries is tested.
+
+    :API: public
+
+    *output:  the expected output entries in expected order
+    **kwargs: additional kwargs passed to execute_rule.
+    """
+    self.assertEqual(list(output), self.execute_rule(**kwargs).splitlines())
+
+  def assert_console_raises(self, exception, **kwargs):
+    """Verifies the expected exception is raised by the console task under test.
+
+    :API: public
+
+    **kwargs: additional kwargs are passed to execute_rule.
+    """
+    with self.assertRaises(exception):
+      self.execute_rule(**kwargs)
diff --git a/src/python/pants/testutil/engine/BUILD b/src/python/pants/testutil/engine/BUILD
new file mode 100644
index 00000000000..d24bdb33753
--- /dev/null
+++ b/src/python/pants/testutil/engine/BUILD
@@ -0,0 +1,30 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+python_library(
+  name = 'engine_test_base',
+  sources = ['base_engine_test.py'],
+  dependencies = [
+    'src/python/pants/goal',
+    'src/python/pants/goal:task_registrar',
+    'src/python/pants/testutil:test_base',
+  ]
+)
+
+
+python_library(
+  name = 'util',
+  source = 'util.py',
+  dependencies = [
+    '3rdparty/python:ansicolors',
+    'src/python/pants/base:project_tree',
+    'src/python/pants/binaries',
+    'src/python/pants/engine:addressable',
+    'src/python/pants/engine:native',
+    'src/python/pants/engine:parser',
+    'src/python/pants/engine:rules',
+    'src/python/pants/engine:scheduler',
+    'src/python/pants/engine:struct',
+    'src/python/pants/util:objects',
+  ],
+)
diff --git a/src/python/pants/testutil/engine/__init__.py b/src/python/pants/testutil/engine/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/src/python/pants/testutil/engine/base_engine_test.py b/src/python/pants/testutil/engine/base_engine_test.py
new file mode 100644
index 00000000000..8f3a6996396
--- /dev/null
+++ b/src/python/pants/testutil/engine/base_engine_test.py
@@ -0,0 +1,64 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+from pants.goal.goal import Goal
+from pants.goal.task_registrar import TaskRegistrar
+from pants.testutil.test_base import TestBase
+
+
+class EngineTestBase(TestBase):
+  """
+  :API: public
+  """
+
+  @classmethod
+  def as_goal(cls, goal_name):
+    """Returns a ``Goal`` object of the given name
+
+    :API: public
+    """
+    return Goal.by_name(goal_name)
+
+  @classmethod
+  def as_goals(cls, *goal_names):
+    """Converts the given goal names to a list of ``Goal`` objects.
+
+    :API: public
+    """
+    return [cls.as_goal(goal_name) for goal_name in goal_names]
+
+  @classmethod
+  def install_task(cls, name, action=None, dependencies=None, goal=None):
+    """Creates and installs a task with the given name.
+
+    :API: public
+
+    :param string name: The task name.
+    :param action: The task's action.
+    :param list dependencies: The list of goal names the task depends on, if any.
+    :param string goal: The name of the goal to install the task in, if different from the task
+                        name.
+    :returns The ``Goal`` object with task installed.
+    """
+    return TaskRegistrar(name,
+                         action=action or (lambda: None),
+                         dependencies=dependencies or []).install(goal if goal is not None else None)
+
+  def setUp(self):
+    """
+    :API: public
+    """
+    super().setUp()
+
+    # TODO(John Sirois): Now that the BuildFileParser controls goal registration by iterating
+    # over plugin callbacks a GoalRegistry can be constructed by it and handed to all these
+    # callbacks in place of having a global Goal registry.  Remove the Goal static cling.
+    Goal.clear()
+
+  def tearDown(self):
+    """
+    :API: public
+    """
+    Goal.clear()
+
+    super().tearDown()
diff --git a/src/python/pants/testutil/engine/util.py b/src/python/pants/testutil/engine/util.py
new file mode 100644
index 00000000000..b1aa5a9673a
--- /dev/null
+++ b/src/python/pants/testutil/engine/util.py
@@ -0,0 +1,171 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+import os
+import re
+from io import StringIO
+from types import GeneratorType
+
+from colors import blue, green, red
+
+from pants.base.file_system_project_tree import FileSystemProjectTree
+from pants.engine.addressable import addressable_list
+from pants.engine.native import Native
+from pants.engine.parser import SymbolTable
+from pants.engine.scheduler import Scheduler
+from pants.engine.selectors import Get
+from pants.engine.struct import Struct
+from pants.option.global_options import DEFAULT_EXECUTION_OPTIONS
+from pants.util.objects import SubclassesOf
+
+
+def run_rule(rule, *args):
+  """A test helper function that runs an @rule with a set of arguments and Get providers.
+
+  An @rule named `my_rule` that takes one argument and makes no `Get` requests can be invoked
+  like so (although you could also just invoke it directly):
+  ```
+  return_value = run_rule(my_rule, arg1)
+  ```
+
+  In the case of an @rule that makes Get requests, things get more interesting: an extra argument
+  is required that represents a dict mapping (product, subject) type pairs to one argument functions
+  that take a subject value and return a product value.
+
+  So in the case of an @rule named `my_co_rule` that takes one argument and makes Get requests
+  for product and subject types (Listing, Dir), the invoke might look like:
+  ```
+  return_value = run_rule(my_co_rule, arg1, {(Listing, Dir): lambda x: Listing(..)})
+  ```
+
+  :returns: The return value of the completed @rule.
+  """
+
+  task_rule = getattr(rule, 'rule', None)
+  if task_rule is None:
+    raise TypeError('Expected to receive a decorated `@rule`; got: {}'.format(rule))
+
+  gets_len = len(task_rule.input_gets)
+
+  if len(args) != len(task_rule.input_selectors) + (1 if gets_len else 0):
+    raise ValueError('Rule expected to receive arguments of the form: {}; got: {}'.format(
+      task_rule.input_selectors, args))
+
+  args, get_providers = (args[:-1], args[-1]) if gets_len > 0 else (args, {})
+  if gets_len != len(get_providers):
+    raise ValueError('Rule expected to receive Get providers for {}; got: {}'.format(
+      task_rule.input_gets, get_providers))
+
+  res = rule(*args)
+  if not isinstance(res, GeneratorType):
+    return res
+
+  def get(product, subject):
+    provider = get_providers.get((product, type(subject)))
+    if provider is None:
+      raise AssertionError('Rule requested: Get{}, which cannot be satisfied.'.format(
+        (product, type(subject), subject)))
+    return provider(subject)
+
+  rule_coroutine = res
+  rule_input = None
+  while True:
+    res = rule_coroutine.send(rule_input)
+    if isinstance(res, Get):
+      rule_input = get(res.product, res.subject)
+    elif type(res) in (tuple, list):
+      rule_input = [get(g.product, g.subject) for g in res]
+    else:
+      return res
+
+
+def init_native():
+  """Return the `Native` instance."""
+  return Native()
+
+
+def create_scheduler(rules, union_rules=None, validate=True, native=None):
+  """Create a Scheduler."""
+  native = native or init_native()
+  return Scheduler(
+    native,
+    FileSystemProjectTree(os.getcwd()),
+    './.pants.d',
+    rules,
+    union_rules,
+    execution_options=DEFAULT_EXECUTION_OPTIONS,
+    validate=validate,
+  )
+
+
+class Target(Struct):
+  def __init__(self, name=None, configurations=None, **kwargs):
+    super().__init__(name=name, **kwargs)
+    self.configurations = configurations
+
+  @addressable_list(SubclassesOf(Struct))
+  def configurations(self):
+    pass
+
+
+TARGET_TABLE = SymbolTable({'struct': Struct, 'target': Target})
+
+
+def assert_equal_with_printing(test_case, expected, actual):
+  """Asserts equality, but also prints the values so they can be compared on failure.
+
+  Usage:
+
+     class FooTest(unittest.TestCase):
+       assert_equal_with_printing = assert_equal_with_printing
+
+       def test_foo(self):
+         self.assert_equal_with_printing("a", "b")
+  """
+  str_actual = str(actual)
+  print('Expected:')
+  print(expected)
+  print('Actual:')
+  print(str_actual)
+  test_case.assertEqual(expected, str_actual)
+
+
+def remove_locations_from_traceback(trace):
+  location_pattern = re.compile('"/.*", line \d+')
+  address_pattern = re.compile('0x[0-9a-f]+')
+  new_trace = location_pattern.sub('LOCATION-INFO', trace)
+  new_trace = address_pattern.sub('0xEEEEEEEEE', new_trace)
+  return new_trace
+
+
+class MockConsole:
+  """An implementation of pants.engine.console.Console which captures output."""
+
+  def __init__(self, use_colors=True):
+    self.stdout = StringIO()
+    self.stderr = StringIO()
+    self._use_colors = use_colors
+
+  def write_stdout(self, payload):
+    self.stdout.write(payload)
+
+  def write_stderr(self, payload):
+    self.stderr.write(payload)
+
+  def print_stdout(self, payload):
+    print(payload, file=self.stdout)
+
+  def print_stderr(self, payload):
+    print(payload, file=self.stderr)
+
+  def _safe_color(self, text, color):
+    return color(text) if self._use_colors else text
+
+  def blue(self, text):
+    return self._safe_color(text, blue)
+
+  def green(self, text):
+    return self._safe_color(text, green)
+
+  def red(self, text):
+    return self._safe_color(text, red)
diff --git a/src/python/pants/testutil/file_test_util.py b/src/python/pants/testutil/file_test_util.py
new file mode 100644
index 00000000000..0731cae0249
--- /dev/null
+++ b/src/python/pants/testutil/file_test_util.py
@@ -0,0 +1,64 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+import os
+
+
+def exact_files(directory, ignore_links=False):
+  """Returns the relative files contained in the directory.
+
+  :API: public
+
+  :param str directory: Path to directory to search.
+  :param bool ignore_links: Indicates to ignore any file links.
+  """
+  found = []
+  for root, _, files in os.walk(directory, followlinks=not ignore_links):
+    for f in files:
+      p = os.path.join(root, f)
+      if ignore_links and os.path.islink(p):
+        continue
+      found.append(os.path.relpath(p, directory))
+
+  return found
+
+
+def contains_exact_files(directory, expected_files, ignore_links=False):
+  """Check if the only files which directory contains are expected_files.
+
+  :API: public
+
+  :param str directory: Path to directory to search.
+  :param iterable expected_files: Set of filepaths relative to directory to search for.
+  :param bool ignore_links: Indicates to ignore any file links.
+  """
+
+  return sorted(expected_files) == sorted(exact_files(directory, ignore_links=ignore_links))
+
+
+def check_file_content(path, expected_content):
+  """Check file has expected content.
+
+  :API: public
+
+  :param str path: Path to file.
+  :param str expected_content: Expected file content.
+  """
+  with open(path, 'r') as input:
+    return expected_content == input.read()
+
+
+def check_symlinks(directory, symlinks=True):
+  """Check files under directory are symlinks.
+
+  :API: public
+
+  :param str directory: Path to directory to search.
+  :param bool symlinks: If true, verify files are symlinks, if false, verify files are actual files.
+  """
+  for root, _, files in os.walk(directory):
+    for f in files:
+      p = os.path.join(root, f)
+      if symlinks ^ os.path.islink(p):
+        return False
+  return True
diff --git a/src/python/pants/testutil/git_util.py b/src/python/pants/testutil/git_util.py
new file mode 100644
index 00000000000..da6872e20fc
--- /dev/null
+++ b/src/python/pants/testutil/git_util.py
@@ -0,0 +1,58 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+import re
+import subprocess
+from contextlib import contextmanager
+from typing import Iterator, Optional
+
+from pants.base.revision import Revision
+from pants.scm.git import Git
+from pants.util.contextutil import environment_as, temporary_dir
+
+
+MIN_REQUIRED_GIT_VERSION = Revision.semver('1.7.10')
+
+
+def git_version() -> Revision:
+  """Get a Version() based on installed command-line git's version"""
+  stdout = subprocess.run(
+    ['git', '--version'], stdout=subprocess.PIPE, encoding="utf-8", check=True
+  ).stdout
+  # stdout is like 'git version 1.9.1.598.g9119e8b\n'  We want '1.9.1.598'
+  matches = re.search(r'\s(\d+(?:\.\d+)*)[\s\.]', stdout)
+  if matches is None:
+    raise ValueError(f"Not able to parse git version from {stdout}.")
+  return Revision.lenient(matches.group(1))
+
+
+@contextmanager
+def initialize_repo(worktree: str, *, gitdir: Optional[str] = None) -> Iterator[Git]:
+  """Initialize a git repository for the given `worktree`.
+
+  NB: The given `worktree` must contain at least one file which will be committed to form an initial
+  commit.
+
+  :param worktree: The path to the git work tree.
+  :param gitdir: An optional path to the `.git` dir to use.
+  :returns: A `Git` repository object that can be used to interact with the repo.
+  """
+  @contextmanager
+  def use_gitdir() -> Iterator[str]:
+    if gitdir:
+      yield gitdir
+    else:
+      with temporary_dir() as d:
+        yield d
+
+  with use_gitdir() as git_dir, environment_as(GIT_DIR=git_dir, GIT_WORK_TREE=worktree):
+    subprocess.run(['git', 'init'], check=True)
+    subprocess.run(['git', 'config', 'user.email', 'you@example.com'], check=True)
+    # TODO: This method inherits the global git settings, so if a developer has gpg signing on, this
+    # will turn that off. We should probably just disable reading from the global config somehow:
+    # https://git-scm.com/docs/git-config.
+    subprocess.run(['git', 'config', 'commit.gpgSign', 'false'], check=True)
+    subprocess.run(['git', 'config', 'user.name', 'Your Name'], check=True)
+    subprocess.run(['git', 'add', '.'], check=True)
+    subprocess.run(['git', 'commit', '-am', 'Add project files.'], check=True)
+    yield Git(gitdir=git_dir, worktree=worktree)
diff --git a/src/python/pants/testutil/interpreter_selection_utils.py b/src/python/pants/testutil/interpreter_selection_utils.py
new file mode 100644
index 00000000000..1ebeeaea2a5
--- /dev/null
+++ b/src/python/pants/testutil/interpreter_selection_utils.py
@@ -0,0 +1,85 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+import os
+import subprocess
+from unittest import skipIf
+
+
+PY_2 = '2'
+PY_3 = '3'
+
+PY_27 = '2.7'
+PY_36 = '3.6'
+PY_37 = '3.7'
+
+
+def has_python_version(version):
+  """Returns `True` if the current system has the specified version of python.
+
+  :param string version: A python version string, such as 2.7, 3.
+  """
+  # TODO: Tests that skip unless a python interpreter is present often need the path to that
+  # interpreter, and so end up calling python_interpreter_path again. Find a way to streamline this.
+  return python_interpreter_path(version) is not None
+
+
+def python_interpreter_path(version):
+  """Returns the interpreter path if the current system has the specified version of python.
+
+  :param string version: A python version string, such as 2.7, 3.
+  :returns: the normalized path to the interpreter binary if found; otherwise `None`
+  :rtype: string
+  """
+  try:
+    command = ['python{}'.format(version), '-c', 'import sys; print(sys.executable)']
+    py_path = subprocess.check_output(command).decode().strip()
+    return os.path.realpath(py_path)
+  except (subprocess.CalledProcessError, FileNotFoundError):
+    return None
+
+
+def skip_unless_all_pythons_present(*versions):
+  """A decorator that only runs the decorated test method if all of the specified pythons are present.
+
+  :param string *versions: Python version strings, such as 2.7, 3.
+  """
+  missing_versions = [v for v in versions if not has_python_version(v)]
+  if len(missing_versions) == 1:
+    return skipIf(True, 'Could not find python {} on system. Skipping.'.format(missing_versions[0]))
+  elif len(missing_versions) > 1:
+    return skipIf(True,
+                  'Skipping due to the following missing required pythons: {}'
+                  .format(', '.join(missing_versions)))
+  else:
+    return skipIf(False, 'All required pythons present, continuing with test!')
+
+
+def skip_unless_python27_present(func):
+  """A test skip decorator that only runs a test method if python2.7 is present."""
+  return skip_unless_all_pythons_present(PY_27)(func)
+
+
+def skip_unless_python3_present(func):
+  """A test skip decorator that only runs a test method if python3 is present."""
+  return skip_unless_all_pythons_present(PY_3)(func)
+
+
+def skip_unless_python36_present(func):
+  """A test skip decorator that only runs a test method if python3.6 is present."""
+  return skip_unless_all_pythons_present(PY_36)(func)
+
+
+def skip_unless_python27_and_python3_present(func):
+  """A test skip decorator that only runs a test method if python2.7 and python3 are present."""
+  return skip_unless_all_pythons_present(PY_27, PY_3)(func)
+
+
+def skip_unless_python27_and_python36_present(func):
+  """A test skip decorator that only runs a test method if python2.7 and python3.6 are present."""
+  return skip_unless_all_pythons_present(PY_27, PY_36)(func)
+
+
+def skip_unless_python36_and_python37_present(func):
+  """A test skip decorator that only runs a test method if python3.6 and python3.7 are present."""
+  return skip_unless_all_pythons_present(PY_36, PY_37)(func)
diff --git a/src/python/pants/testutil/jvm/BUILD b/src/python/pants/testutil/jvm/BUILD
new file mode 100644
index 00000000000..e14001b2602
--- /dev/null
+++ b/src/python/pants/testutil/jvm/BUILD
@@ -0,0 +1,50 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+python_library(
+  name='jvm_tool_task_test_base',
+  sources=['jvm_tool_task_test_base.py'],
+  dependencies=[
+    '//:build_tools',
+    '//:3rdparty_directory',
+    ':jvm_task_test_base',
+    'src/python/pants/backend/jvm/subsystems:jvm_tool_mixin',
+    'src/python/pants/backend/jvm/targets:jvm',
+    'src/python/pants/backend/jvm/targets:scala',
+    'src/python/pants/backend/jvm/tasks:bootstrap_jvm_tools',
+    'src/python/pants/backend/jvm/tasks:nailgun_task',
+    'src/python/pants/base:build_environment',
+    'src/python/pants/build_graph',
+    'src/python/pants/ivy',
+    'src/python/pants/java/jar',
+    'src/python/pants/util:dirutil',
+  ],
+)
+
+python_library(
+  name='nailgun_task_test_base',
+  sources=['nailgun_task_test_base.py'],
+  dependencies=[
+    ':jvm_tool_task_test_base',
+    'src/python/pants/backend/jvm/tasks:nailgun_task',
+  ],
+)
+
+python_library(
+  name='jar_task_test_base',
+  sources=['jar_task_test_base.py'],
+  dependencies=[
+    ':nailgun_task_test_base',
+  ],
+)
+
+python_library(
+  name='jvm_task_test_base',
+  sources=['jvm_task_test_base.py'],
+  dependencies=[
+    'src/python/pants/backend/jvm/tasks:classpath_products',
+    'src/python/pants/testutil/subsystem',
+    'src/python/pants/testutil:task_test_base',
+    'src/python/pants/util:dirutil',
+  ],
+)
diff --git a/src/python/pants/testutil/jvm/__init__.py b/src/python/pants/testutil/jvm/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/src/python/pants/testutil/jvm/jar_task_test_base.py b/src/python/pants/testutil/jvm/jar_task_test_base.py
new file mode 100644
index 00000000000..a5537e397cf
--- /dev/null
+++ b/src/python/pants/testutil/jvm/jar_task_test_base.py
@@ -0,0 +1,11 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+from pants.testutil.jvm.nailgun_task_test_base import NailgunTaskTestBase
+
+
+class JarTaskTestBase(NailgunTaskTestBase):
+  """Prepares an ephemeral test build root that supports jar tasks.
+
+  :API: public
+  """
diff --git a/src/python/pants/testutil/jvm/jvm_task_test_base.py b/src/python/pants/testutil/jvm/jvm_task_test_base.py
new file mode 100644
index 00000000000..fa4f3b55a64
--- /dev/null
+++ b/src/python/pants/testutil/jvm/jvm_task_test_base.py
@@ -0,0 +1,61 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+import os
+
+from pants.backend.jvm.subsystems.resolve_subsystem import JvmResolveSubsystem
+from pants.backend.jvm.tasks.classpath_products import ClasspathProducts
+from pants.testutil.subsystem.util import init_subsystem
+from pants.testutil.task_test_base import TaskTestBase
+from pants.util.dirutil import safe_file_dump, safe_mkdir, safe_mkdtemp
+
+
+class JvmTaskTestBase(TaskTestBase):
+  """
+  :API: public
+  """
+
+  def setUp(self):
+    """
+    :API: public
+    """
+    super().setUp()
+    init_subsystem(JvmResolveSubsystem)
+    self.set_options_for_scope('resolver', resolver='ivy')
+
+  def populate_runtime_classpath(self, context, classpath=None):
+    """
+    Helps actual test cases to populate the 'runtime_classpath' products data mapping
+    in the context, which holds the classpath value for targets.
+
+    :API: public
+
+    :param context: The execution context where the products data mapping lives.
+    :param classpath: a list of classpath strings. If not specified,
+                      [os.path.join(self.buildroot, 'none')] will be used.
+    """
+    classpath = classpath or []
+    runtime_classpath = self.get_runtime_classpath(context)
+    runtime_classpath.add_for_targets(context.targets(),
+                                      [('default', entry) for entry in classpath])
+
+  def add_to_runtime_classpath(self, context, tgt, files_dict):
+    """Creates and adds the given files to the classpath for the given target under a temp path.
+
+    :API: public
+    """
+    runtime_classpath = self.get_runtime_classpath(context)
+    # Create a temporary directory under the target id, then dump all files.
+    target_dir = os.path.join(self.test_workdir, tgt.id)
+    safe_mkdir(target_dir)
+    classpath_dir = safe_mkdtemp(dir=target_dir)
+    for rel_path, content in files_dict.items():
+      safe_file_dump(os.path.join(classpath_dir, rel_path), content)
+    # Add to the classpath.
+    runtime_classpath.add_for_target(tgt, [('default', classpath_dir)])
+
+  def get_runtime_classpath(self, context):
+    """
+    :API: public
+    """
+    return context.products.get_data('runtime_classpath', init_func=ClasspathProducts.init_func(self.pants_workdir))
diff --git a/src/python/pants/testutil/jvm/jvm_tool_task_test_base.py b/src/python/pants/testutil/jvm/jvm_tool_task_test_base.py
new file mode 100644
index 00000000000..939afa81f3c
--- /dev/null
+++ b/src/python/pants/testutil/jvm/jvm_tool_task_test_base.py
@@ -0,0 +1,122 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+import os
+import shutil
+
+from pants.backend.jvm.register import build_file_aliases
+from pants.backend.jvm.subsystems.jvm_tool_mixin import JvmToolMixin
+from pants.backend.jvm.tasks.bootstrap_jvm_tools import BootstrapJvmTools
+from pants.backend.jvm.tasks.nailgun_task import NailgunTask
+from pants.base.build_environment import get_pants_cachedir
+from pants.build_graph.build_file_aliases import BuildFileAliases
+from pants.build_graph.target import Target
+from pants.ivy.bootstrapper import Bootstrapper
+from pants.testutil.jvm.jvm_task_test_base import JvmTaskTestBase
+from pants.util.dirutil import safe_mkdir
+
+
+class JvmToolTaskTestBase(JvmTaskTestBase):
+  """Prepares an ephemeral test build root that supports tasks that use jvm tool bootstrapping.
+
+  :API: public
+  """
+
+  @classmethod
+  def alias_groups(cls):
+    """
+    :API: public
+    """
+    # Aliases appearing in our real BUILD.tools.
+    return build_file_aliases().merge(BuildFileAliases(targets={'target': Target}))
+
+  def setUp(self):
+    """
+    :API: public
+    """
+    super().setUp()
+
+    # Use a synthetic subclass for proper isolation when bootstrapping within the test.
+    bootstrap_scope = 'bootstrap_scope'
+    self.bootstrap_task_type = self.synthesize_task_subtype(BootstrapJvmTools, bootstrap_scope)
+    JvmToolMixin.reset_registered_tools()
+
+    # Set some options:
+
+    # 1. Cap BootstrapJvmTools memory usage in tests.  The Xmx was empirically arrived upon using
+    #    -Xloggc and verifying no full gcs for a test using the full gamut of resolving a multi-jar
+    #    tool, constructing a fat jar and then shading that fat jar.
+    #
+    # 2. Allow tests to read/write tool jars from the real artifact cache, so they don't
+    #    each have to resolve and shade them every single time, which is a huge slowdown.
+    #    Note that local artifact cache writes are atomic, so it's fine for multiple concurrent
+    #    tests to write to it.
+    #
+    # Note that we don't have access to the invoking pants instance's options, so we assume that
+    # its artifact cache is in the standard location.  If it isn't, worst case the tests will
+    # populate a second cache at the standard location, which is no big deal.
+    # TODO: We really need a straightforward way for pants's own tests to get to the enclosing
+    # pants instance's options values.
+    artifact_caches = [os.path.join(get_pants_cachedir(), 'artifact_cache')]
+    self.set_options_for_scope(bootstrap_scope,
+                               execution_strategy=NailgunTask.ExecutionStrategy.subprocess,
+                               jvm_options=['-Xmx128m'])
+    self.set_options_for_scope('cache.{}'.format(bootstrap_scope),
+                               read_from=artifact_caches,
+                               write_to=artifact_caches)
+
+    # Copy into synthetic build-root
+    shutil.copy('BUILD.tools', self.build_root)
+    build_root_third_party = os.path.join(self.build_root, '3rdparty')
+    safe_mkdir(build_root_third_party)
+    shutil.copy(os.path.join('3rdparty', 'BUILD'), build_root_third_party)
+
+    Bootstrapper.reset_instance()
+
+  def context(self, for_task_types=None, **kwargs):
+    """
+    :API: public
+    """
+    # Add in the bootstrapper task type, so its options get registered and set.
+    for_task_types = [self.bootstrap_task_type] + (for_task_types or [])
+    return super().context(for_task_types=for_task_types, **kwargs)
+
+  def prepare_execute(self, context):
+    """Prepares a jvm tool-using task for execution, first bootstrapping any required jvm tools.
+
+    Note: Other task pre-requisites will not be ensured and tests must instead setup their own
+          product requirements if any.
+
+    :API: public
+
+    :returns: The prepared Task instance.
+    """
+    # test_workdir is an @property
+    workdir = self.test_workdir
+
+    # Bootstrap the tools needed by the task under test.
+    # We need the bootstrap task's workdir to be under the test's .pants.d, so that it can
+    # use artifact caching.  Making it a sibling of the main task's workdir achieves this.
+    self.bootstrap_task_type.get_alternate_target_roots(context.options,
+                                                        self.address_mapper,
+                                                        self.build_graph)
+    bootstrap_workdir = os.path.join(os.path.dirname(workdir), 'bootstrap_jvm_tools')
+    self.bootstrap_task_type(context, bootstrap_workdir).execute()
+
+    task = self.create_task(context, workdir)
+    return task
+
+  def execute(self, context):
+    """Executes a jvm tool-using task, first bootstrapping any required jvm tools.
+
+    Note: Other task pre-requisites will not be ensured and tests must instead setup their own
+          product requirements if any.
+
+    :API: public
+
+    :returns: The Task instance that was executed.
+    """
+    task = self.prepare_execute(context)
+    if not task.skip_execution:
+      task.execute()
+    return task
diff --git a/src/python/pants/testutil/jvm/nailgun_task_test_base.py b/src/python/pants/testutil/jvm/nailgun_task_test_base.py
new file mode 100644
index 00000000000..470a647a828
--- /dev/null
+++ b/src/python/pants/testutil/jvm/nailgun_task_test_base.py
@@ -0,0 +1,23 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+from pants.backend.jvm.tasks.nailgun_task import NailgunTask
+from pants.testutil.jvm.jvm_tool_task_test_base import JvmToolTaskTestBase
+
+
+class NailgunTaskTestBase(JvmToolTaskTestBase):
+  """Ensures `NailgunTask` tests use subprocess mode to stably test the task under test.
+
+  For subclasses of NailgunTask the nailgun behavior is irrelevant to the code under test and can
+  cause problems in CI environments. As such, disabling nailgunning ensures the test focus is where
+  it needs to be to test the unit.
+
+  :API: public
+  """
+
+  def setUp(self):
+    """
+    :API: public
+    """
+    super().setUp()
+    self.set_options(execution_strategy=NailgunTask.ExecutionStrategy.subprocess)
diff --git a/src/python/pants/testutil/mock_logger.py b/src/python/pants/testutil/mock_logger.py
new file mode 100644
index 00000000000..6f1b3e6a547
--- /dev/null
+++ b/src/python/pants/testutil/mock_logger.py
@@ -0,0 +1,52 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+import sys
+
+from pants.reporting.report import Report
+
+
+class MockLogger:
+  """A standalone logger that writes to stderr.
+
+  :API: public
+
+  Useful for testing without requiring the full RunTracker reporting framework.
+  """
+
+  def __init__(self, level=Report.INFO):
+    self._level = level
+
+  def _maybe_log(self, level, *msg_elements):
+    if level <= self._level:
+      sys.stderr.write(''.join(msg_elements))
+
+  def debug(self, *msg_elements):
+    """
+    :API: public
+    """
+    self._maybe_log(Report.DEBUG, *msg_elements)
+
+  def info(self, *msg_elements):
+    """
+    :API: public
+    """
+    self._maybe_log(Report.INFO, *msg_elements)
+
+  def warn(self, *msg_elements):
+    """
+    :API: public
+    """
+    self._maybe_log(Report.WARN, *msg_elements)
+
+  def error(self, *msg_elements):
+    """
+    :API: public
+    """
+    self._maybe_log(Report.ERROR, *msg_elements)
+
+  def fatal(self, *msg_elements):
+    """
+    :API: public
+    """
+    self._maybe_log(Report.FATAL, *msg_elements)
diff --git a/src/python/pants/testutil/option/BUILD b/src/python/pants/testutil/option/BUILD
new file mode 100644
index 00000000000..d48a5733449
--- /dev/null
+++ b/src/python/pants/testutil/option/BUILD
@@ -0,0 +1,10 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+python_library(
+  dependencies=[
+    '//:build_root',
+    '//:pants_ini',
+    'src/python/pants/option',
+  ],
+)
diff --git a/src/python/pants/testutil/option/__init__.py b/src/python/pants/testutil/option/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/src/python/pants/testutil/option/fakes.py b/src/python/pants/testutil/option/fakes.py
new file mode 100644
index 00000000000..d74a33b1e22
--- /dev/null
+++ b/src/python/pants/testutil/option/fakes.py
@@ -0,0 +1,198 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+from collections import defaultdict
+
+from pants.option.global_options import GlobalOptionsRegistrar
+from pants.option.option_util import is_list_option
+from pants.option.parser import Parser
+from pants.option.parser_hierarchy import enclosing_scope
+from pants.option.ranked_value import RankedValue
+from pants.option.scope import GLOBAL_SCOPE
+
+
+class _FakeOptionValues(object):
+  def __init__(self, option_values):
+    self._option_values = option_values
+
+  def __iter__(self):
+    return iter(self._option_values.keys())
+
+  def __getitem__(self, key):
+    return getattr(self, key)
+
+  def get(self, key, default=None):
+    if hasattr(self, key):
+      return getattr(self, key, default)
+    return default
+
+  def __getattr__(self, key):
+    try:
+      value = self._option_values[key]
+    except KeyError:
+      # Instead of letting KeyError raise here, re-raise an AttributeError to not break getattr().
+      raise AttributeError(key)
+    return value.value if isinstance(value, RankedValue) else value
+
+  def get_rank(self, key):
+    value = self._option_values[key]
+    return value.rank if isinstance(value, RankedValue) else RankedValue.FLAG
+
+  def is_flagged(self, key):
+    return self.get_rank(key) == RankedValue.FLAG
+
+  def is_default(self, key):
+    return self.get_rank(key) in (RankedValue.NONE, RankedValue.HARDCODED)
+
+  @property
+  def option_values(self):
+    return self._option_values
+
+
+def _options_registration_function(defaults, fingerprintables):
+  def register(*args, **kwargs):
+    option_name = Parser.parse_dest(*args, **kwargs)
+
+    default = kwargs.get('default')
+    if default is None:
+      if kwargs.get('type') == bool:
+        default = False
+      if kwargs.get('type') == list:
+        default = []
+    defaults[option_name] = RankedValue(RankedValue.HARDCODED, default)
+
+    fingerprint = kwargs.get('fingerprint', False)
+    if fingerprint:
+      if is_list_option(kwargs):
+        val_type = kwargs.get('member_type', str)
+      else:
+        val_type = kwargs.get('type', str)
+      fingerprintables[option_name] = val_type
+
+  return register
+
+
+def create_options(options, passthru_args=None, fingerprintable_options=None):
+  """Create a fake Options object for testing.
+
+  Note that the returned object only provides access to the provided options values. There is
+  no registration mechanism on this object. Code under test shouldn't care about resolving
+  cmd-line flags vs. config vs. env vars etc. etc.
+
+  :param dict options: A dict of scope -> (dict of option name -> value).
+  :param list passthru_args: A list of passthrough command line argument values.
+  :param dict fingerprintable_options: A dict of scope -> (dict of option name -> option type).
+                                       This registry should contain entries for any of the
+                                       `options` that are expected to contribute to fingerprinting.
+  :returns: An fake `Options` object encapsulating the given scoped options.
+  """
+  fingerprintable = fingerprintable_options or defaultdict(dict)
+
+  class FakeOptions:
+    def for_scope(self, scope):
+      # TODO(John Sirois): Some users pass in A dict of scope -> _FakeOptionValues instead of a
+      # dict of scope -> (dict of option name -> value).  Clean up these usages and kill this
+      # accommodation.
+      options_for_this_scope = options.get(scope) or {}
+      if isinstance(options_for_this_scope, _FakeOptionValues):
+        options_for_this_scope = options_for_this_scope.option_values
+
+      scoped_options = {}
+      if scope:
+        scoped_options.update(self.for_scope(enclosing_scope(scope)).option_values)
+      scoped_options.update(options_for_this_scope)
+      return _FakeOptionValues(scoped_options)
+
+    def for_global_scope(self):
+      return self.for_scope(GLOBAL_SCOPE)
+
+    def passthru_args_for_scope(self, scope):
+      return passthru_args or []
+
+    def items(self):
+      return list(options.items())
+
+    @property
+    def scope_to_flags(self):
+      return {}
+
+    def get_fingerprintable_for_scope(self, bottom_scope, include_passthru=False):
+      """Returns a list of fingerprintable (option type, option value) pairs for
+      the given scope.
+
+      Note that this method only collects values for a single scope, NOT from
+      all enclosing scopes as in the Options class!
+
+      :param str bottom_scope: The scope to gather fingerprintable options for.
+      :param bool include_passthru: Whether to include passthru args captured by `bottom_scope` in the
+                                    fingerprintable options.
+      """
+      pairs = []
+      if include_passthru:
+        pu_args = self.passthru_args_for_scope(bottom_scope)
+        pairs.extend((str, arg) for arg in pu_args)
+
+      option_values = self.for_scope(bottom_scope)
+      for option_name, option_type in fingerprintable[bottom_scope].items():
+        pairs.append((option_type, option_values[option_name]))
+      return pairs
+
+    def __getitem__(self, scope):
+      return self.for_scope(scope)
+
+  return FakeOptions()
+
+
+def create_options_for_optionables(optionables,
+                                   options=None,
+                                   options_fingerprintable=None,
+                                   passthru_args=None):
+  """Create a fake Options object for testing with appropriate defaults for the given optionables.
+
+  Any scoped `options` provided will override defaults, behaving as-if set on the command line.
+
+  :param iterable optionables: A series of `Optionable` types to register default options for.
+  :param dict options: A dict of scope -> (dict of option name -> value) representing option values
+                       explicitly set via the command line.
+  :param dict options_fingerprintable: A dict of scope -> (dict of option name -> option type)
+                                       representing the fingerprintable options
+                                       and the scopes they are registered for.
+  :param list passthru_args: A list of passthrough args (specified after `--` on the command line).
+  :returns: A fake `Options` object with defaults populated for the given `optionables` and any
+            explicitly set `options` overlayed.
+  """
+  all_options = defaultdict(dict)
+  fingerprintable_options = defaultdict(dict)
+  bootstrap_option_values = None
+
+  if options_fingerprintable:
+    for scope, opts in options_fingerprintable.items():
+      fingerprintable_options[scope].update(opts)
+
+  def register_func(on_scope):
+    scoped_options = all_options[on_scope]
+    scoped_fingerprintables = fingerprintable_options[on_scope]
+    register = _options_registration_function(scoped_options, scoped_fingerprintables)
+    register.bootstrap = bootstrap_option_values
+    register.scope = on_scope
+    return register
+
+  # TODO: This sequence is a bit repetitive of the real registration sequence.
+
+  # Register bootstrap options and grab their default values for use in subsequent registration.
+  GlobalOptionsRegistrar.register_bootstrap_options(register_func(GLOBAL_SCOPE))
+  bootstrap_option_values = _FakeOptionValues(all_options[GLOBAL_SCOPE].copy())
+
+  # Now register the full global scope options.
+  GlobalOptionsRegistrar.register_options(register_func(GLOBAL_SCOPE))
+
+  for optionable in optionables:
+    optionable.register_options(register_func(optionable.options_scope))
+
+  if options:
+    for scope, opts in options.items():
+      all_options[scope].update(opts)
+
+  return create_options(all_options,
+                        passthru_args=passthru_args,
+                        fingerprintable_options=fingerprintable_options)
diff --git a/src/python/pants/testutil/pants_run_integration_test.py b/src/python/pants/testutil/pants_run_integration_test.py
new file mode 100644
index 00000000000..bc68840f51f
--- /dev/null
+++ b/src/python/pants/testutil/pants_run_integration_test.py
@@ -0,0 +1,664 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+import configparser
+import glob
+import os
+import re
+import shutil
+import subprocess
+import sys
+import unittest
+from contextlib import contextmanager
+from dataclasses import dataclass
+from operator import eq, ne
+from threading import Lock
+from typing import Any, Callable, List, Optional, Union
+
+from colors import strip_color
+
+from pants.base.build_environment import get_buildroot
+from pants.base.build_file import BuildFile
+from pants.base.exiter import PANTS_SUCCEEDED_EXIT_CODE
+from pants.fs.archive import ZIP
+from pants.subsystem.subsystem import Subsystem
+from pants.testutil.file_test_util import check_symlinks, contains_exact_files
+from pants.util.contextutil import environment_as, pushd, temporary_dir
+from pants.util.dirutil import fast_relpath, safe_mkdir, safe_mkdir_for, safe_open
+from pants.util.osutil import Pid
+from pants.util.process_handler import SubprocessProcessHandler
+from pants.util.strutil import ensure_binary
+
+
+# NB: If `shell=True`, it's a single `str`.
+Command = Union[str, List[str]]
+
+
+@dataclass(frozen=True)
+class PantsResult:
+  command: Command
+  returncode: int
+  stdout_data: str
+  stderr_data: str
+  workdir: str
+  pid: Pid
+
+
+@dataclass(frozen=True)
+class PantsJoinHandle:
+  command: Command
+  process: subprocess.Popen
+  workdir: str
+
+  def join(
+    self, stdin_data: Optional[Union[bytes, str]] = None, tee_output: bool = False
+  ) -> PantsResult:
+    """Wait for the pants process to complete, and return a PantsResult for it."""
+
+    communicate_fn = self.process.communicate
+    if tee_output:
+      communicate_fn = SubprocessProcessHandler(self.process).communicate_teeing_stdout_and_stderr
+    if stdin_data is not None:
+      stdin_data = ensure_binary(stdin_data)
+    (stdout_data, stderr_data) = communicate_fn(stdin_data)
+
+    if self.process.returncode != PANTS_SUCCEEDED_EXIT_CODE:
+      render_logs(self.workdir)
+
+    return PantsResult(
+      command=self.command,
+      returncode=self.process.returncode,
+      stdout_data=stdout_data.decode(),
+      stderr_data=stderr_data.decode(),
+      workdir=self.workdir,
+      pid=self.process.pid
+    )
+
+
+def ensure_cached(expected_num_artifacts=None):
+  """Decorator for asserting cache writes in an integration test.
+
+  :param expected_num_artifacts: Expected number of artifacts to be in the task's
+                                 cache after running the test. If unspecified, will
+                                 assert that the number of artifacts in the cache is
+                                 non-zero.
+  """
+  def decorator(test_fn):
+    def wrapper(self, *args, **kwargs):
+      with temporary_dir() as artifact_cache:
+        cache_args = f'--cache-write-to=["{artifact_cache}"]'
+
+        test_fn(self, *args + (cache_args,), **kwargs)
+
+        num_artifacts = 0
+        for (root, _, files) in os.walk(artifact_cache):
+          print(root, files)
+          num_artifacts += len(files)
+
+        if expected_num_artifacts is None:
+          self.assertNotEqual(num_artifacts, 0)
+        else:
+          self.assertEqual(num_artifacts, expected_num_artifacts)
+    return wrapper
+  return decorator
+
+
+def ensure_resolver(f):
+  """A decorator for running an integration test with ivy and coursier as the resolver."""
+  def wrapper(self, *args, **kwargs):
+    for env_var_value in ('ivy', 'coursier'):
+      with environment_as(HERMETIC_ENV='PANTS_RESOLVER_RESOLVER', PANTS_RESOLVER_RESOLVER=env_var_value):
+        f(self, *args, **kwargs)
+
+  return wrapper
+
+
+def ensure_daemon(f):
+  """A decorator for running an integration test with and without the daemon enabled."""
+  def wrapper(self, *args, **kwargs):
+    for enable_daemon in [False, True]:
+      with temporary_dir() as subprocess_dir:
+        enable_daemon_str = str(enable_daemon)
+        env = {
+            'HERMETIC_ENV': 'PANTS_ENABLE_PANTSD,PANTS_ENABLE_V2_ENGINE,PANTS_SUBPROCESSDIR',
+            'PANTS_ENABLE_PANTSD': enable_daemon_str,
+            'PANTS_ENABLE_V2_ENGINE': enable_daemon_str,
+            'PANTS_SUBPROCESSDIR': subprocess_dir,
+          }
+        with environment_as(**env):
+          try:
+            f(self, *args, **kwargs)
+            if enable_daemon:
+              self.assert_success(self.run_pants(['kill-pantsd']))
+          except Exception:
+            print(f'Test failed with enable-pantsd={enable_daemon}:')
+            if enable_daemon:
+              # If we are already raising, do not attempt to confirm that `kill-pantsd` succeeds.
+              self.run_pants(['kill-pantsd'])
+            else:
+              print('Skipping run with enable-pantsd=true because it already failed with enable-pantsd=false.')
+            raise
+  return wrapper
+
+
+def render_logs(workdir):
+  """Renders all potentially relevant logs from the given workdir to stdout."""
+  filenames = list(
+      glob.glob(os.path.join(workdir, 'logs/exceptions*log'))
+    ) + list(
+      glob.glob(os.path.join(workdir, 'pantsd/pantsd.log'))
+    )
+  for filename in filenames:
+    rel_filename = fast_relpath(filename, workdir)
+    print(f'{rel_filename} +++ ')
+    for line in _read_log(filename):
+      print(f'{rel_filename} >>> {line}')
+    print(f'{rel_filename} --- ')
+
+
+def read_pantsd_log(workdir):
+  """Yields all lines from the pantsd log under the given workdir."""
+  # Surface the pantsd log for easy viewing via pytest's `-s` (don't capture stdio) option.
+  for line in _read_log(f'{workdir}/pantsd/pantsd.log'):
+    yield line
+
+
+def _read_log(filename):
+  with open(filename, 'r') as f:
+    for line in f:
+      yield line.rstrip()
+
+
+class PantsRunIntegrationTest(unittest.TestCase):
+  """A base class useful for integration tests for targets in the same repo."""
+
+  class InvalidTestEnvironmentError(Exception):
+    """Raised when the external environment is not set up properly to run integration tests."""
+
+  @classmethod
+  def use_pantsd_env_var(cls):
+    """Subclasses may override to acknowledge that the tests cannot run when pantsd is enabled,
+    or they want to configure pantsd themselves.
+
+    In those cases, --enable-pantsd will not be added to their configuration.
+    This approach is coarsely grained, meaning we disable pantsd in some tests that actually run
+    when pantsd is enabled. However:
+      - The number of mislabeled tests is currently small (~20 tests).
+      - Those tests will still run, just with pantsd disabled.
+
+    N.B. Currently, this doesn't interact with test hermeticity.
+    This means that, if the test coordinator has set PANTS_ENABLE_PANTSD, and a test is not marked
+    as hermetic, it will run under pantsd regardless of the value of this function.
+    """
+    should_pantsd = os.getenv("USE_PANTSD_FOR_INTEGRATION_TESTS")
+    return should_pantsd in ["True", "true", "1"]
+
+  @classmethod
+  def hermetic(cls):
+    """Subclasses may override to acknowledge that they are hermetic.
+
+    That is, that they should run without reading the real pants.ini.
+    """
+    return False
+
+  @classmethod
+  def hermetic_env_whitelist(cls):
+    """A whitelist of environment variables to propagate to tests when hermetic=True."""
+    return [
+        # Used in the wrapper script to locate a rust install.
+        'HOME',
+        # Needed to find python interpreters and other binaries.
+        'PATH',
+        'PANTS_PROFILE',
+        # Ensure that the underlying ./pants invocation doesn't run from sources
+        # (and therefore bootstrap) if we don't want it to.
+        'RUN_PANTS_FROM_PEX',
+      ]
+
+  def setUp(self):
+    super().setUp()
+    # Some integration tests rely on clean subsystem state (e.g., to set up a DistributionLocator).
+    Subsystem.reset()
+
+  def temporary_workdir(self, cleanup=True):
+    # We can hard-code '.pants.d' here because we know that will always be its value
+    # in the pantsbuild/pants repo (e.g., that's what we .gitignore in that repo).
+    # Grabbing the pants_workdir config would require this pants's config object,
+    # which we don't have a reference to here.
+    root = os.path.join(get_buildroot(), '.pants.d', 'tmp')
+    safe_mkdir(root)
+    return temporary_dir(root_dir=root, cleanup=cleanup, suffix='.pants.d')
+
+  def temporary_cachedir(self):
+    return temporary_dir(suffix='__CACHEDIR')
+
+  def temporary_sourcedir(self):
+    return temporary_dir(root_dir=get_buildroot())
+
+  @contextmanager
+  def source_clone(self, source_dir):
+    with self.temporary_sourcedir() as clone_dir:
+      target_spec_dir = os.path.relpath(clone_dir)
+
+      for dir_path, dir_names, file_names in os.walk(source_dir):
+        clone_dir_path = os.path.join(clone_dir, os.path.relpath(dir_path, source_dir))
+        for dir_name in dir_names:
+          os.mkdir(os.path.join(clone_dir_path, dir_name))
+        for file_name in file_names:
+          with open(os.path.join(dir_path, file_name), 'r') as f:
+            content = f.read()
+          if BuildFile._is_buildfile_name(file_name):
+            content = content.replace(source_dir, target_spec_dir)
+          with open(os.path.join(clone_dir_path, file_name), 'w') as f:
+            f.write(content)
+
+      yield clone_dir
+
+  # Incremented each time we spawn a pants subprocess.
+  # Appended to PANTS_PROFILE in the called pants process, so that each subprocess
+  # writes to its own profile file, instead of all stomping on the parent process's profile.
+  _profile_disambiguator = 0
+  _profile_disambiguator_lock = Lock()
+
+  @classmethod
+  def _get_profile_disambiguator(cls):
+    with cls._profile_disambiguator_lock:
+      ret = cls._profile_disambiguator
+      cls._profile_disambiguator += 1
+      return ret
+
+  def get_cache_subdir(self, cache_dir, subdir_glob='*/', other_dirs=()):
+    """Check that there is only one entry of `cache_dir` which matches the glob
+    specified by `subdir_glob`, excluding `other_dirs`, and
+    return it.
+
+    :param str cache_dir: absolute path to some directory.
+    :param str subdir_glob: string specifying a glob for (one level down)
+                            subdirectories of `cache_dir`.
+    :param list other_dirs: absolute paths to subdirectories of `cache_dir`
+                            which must exist and match `subdir_glob`.
+    :return: Assert that there is a single remaining directory entry matching
+             `subdir_glob` after removing `other_dirs`, and return it.
+
+             This method oes not check if its arguments or return values are
+             files or directories. If `subdir_glob` has a trailing slash, so
+             will the return value of this method.
+    """
+    subdirs = set(glob.glob(os.path.join(cache_dir, subdir_glob)))
+    other_dirs = set(other_dirs)
+    self.assertTrue(other_dirs.issubset(subdirs))
+    remaining_dirs = subdirs - other_dirs
+    self.assertEqual(len(remaining_dirs), 1)
+    return list(remaining_dirs)[0]
+
+  def run_pants_with_workdir_without_waiting(self, command, workdir, config=None, extra_env=None,
+                                             build_root=None, print_exception_stacktrace=True,
+                                             **kwargs):
+    args = [
+      '--no-pantsrc',
+      f'--pants-workdir={workdir}',
+      '--kill-nailguns',
+      f'--print-exception-stacktrace={print_exception_stacktrace}',
+    ]
+
+    if self.hermetic():
+      args.extend(['--pants-config-files=[]',
+                   # Turn off cache globally.  A hermetic integration test shouldn't rely on cache,
+                   # or we have no idea if it's actually testing anything.
+                   '--no-cache-read', '--no-cache-write',
+                   # Turn cache on just for tool bootstrapping, for performance.
+                   '--cache-bootstrap-read', '--cache-bootstrap-write'
+                   ])
+
+    if self.use_pantsd_env_var():
+      args.append("--enable-pantsd=True")
+      args.append("--no-shutdown-pantsd-after-run")
+
+    if config:
+      config_data = config.copy()
+      # TODO(#6071): RawConfigParser is legacy. Investigate updating to modern API.
+      ini = configparser.RawConfigParser(defaults=config_data.pop('DEFAULT', None))
+      for section, section_config in config_data.items():
+        ini.add_section(section)
+        for key, value in section_config.items():
+          ini.set(section, key, value)
+      ini_file_name = os.path.join(workdir, 'pants.ini')
+      with safe_open(ini_file_name, mode='w') as fp:
+        ini.write(fp)
+      args.append('--pants-config-files=' + ini_file_name)
+
+    pants_script = [sys.executable, '-m', 'pants']
+
+    # Permit usage of shell=True and string-based commands to allow e.g. `./pants | head`.
+    if kwargs.get('shell') is True:
+      assert not isinstance(command, list), 'must pass command as a string when using shell=True'
+      pants_command = ' '.join([*pants_script, ' '.join(args), command])
+    else:
+      pants_command = pants_script + args + command
+
+    # Only whitelisted entries will be included in the environment if hermetic=True.
+    if self.hermetic():
+      env = dict()
+      # With an empty environment, we would generally get the true underlying system default
+      # encoding, which is unlikely to be what we want (it's generally ASCII, still). So we
+      # explicitly set an encoding here.
+      env['LC_ALL'] = 'en_US.UTF-8'
+      for h in self.hermetic_env_whitelist():
+        value = os.getenv(h)
+        if value is not None:
+          env[h] = value
+      hermetic_env = os.getenv('HERMETIC_ENV')
+      if hermetic_env:
+        for h in hermetic_env.strip(',').split(','):
+          env[h] = os.getenv(h)
+    else:
+      env = os.environ.copy()
+    if extra_env:
+      env.update(extra_env)
+    env.update(PYTHONPATH=os.pathsep.join(sys.path))
+
+    # Don't overwrite the profile of this process in the called process.
+    # Instead, write the profile into a sibling file.
+    if env.get('PANTS_PROFILE'):
+      prof = f"{env['PANTS_PROFILE']}.{self._get_profile_disambiguator()}"
+      env['PANTS_PROFILE'] = prof
+      # Make a note the subprocess command, so the user can correctly interpret the profile files.
+      with open(f'{prof}.cmd', 'w') as fp:
+        fp.write(' '.join(pants_command))
+
+    return PantsJoinHandle(
+        command=pants_command,
+        process=subprocess.Popen(
+          pants_command,
+          env=env,
+          stdin=subprocess.PIPE,
+          stdout=subprocess.PIPE,
+          stderr=subprocess.PIPE,
+          **kwargs
+        ),
+        workdir=workdir
+      )
+
+  def run_pants_with_workdir(
+    self, command, workdir, config=None, stdin_data=None, tee_output=False, **kwargs
+  ) -> PantsResult:
+    if config:
+      kwargs["config"] = config
+    handle = self.run_pants_with_workdir_without_waiting(command, workdir, **kwargs)
+    return handle.join(stdin_data=stdin_data, tee_output=tee_output)
+
+  def run_pants(
+    self, command, config=None, stdin_data=None, extra_env=None, cleanup_workdir=True, **kwargs
+  ) -> PantsResult:
+    """Runs pants in a subprocess.
+
+    :param list command: A list of command line arguments coming after `./pants`.
+    :param config: Optional data for a generated ini file. A map of <section-name> ->
+    map of key -> value. If order in the ini file matters, this should be an OrderedDict.
+    :param kwargs: Extra keyword args to pass to `subprocess.Popen`.
+    """
+    with self.temporary_workdir() as workdir:
+      return self.run_pants_with_workdir(
+        command,
+        workdir,
+        config,
+        stdin_data=stdin_data,
+        extra_env=extra_env,
+        **kwargs
+      )
+
+  @contextmanager
+  def pants_results(self, command, config=None, stdin_data=None, extra_env=None, **kwargs):
+    """Similar to run_pants in that it runs pants in a subprocess, but yields in order to give
+    callers a chance to do any necessary validations on the workdir.
+
+    :param list command: A list of command line arguments coming after `./pants`.
+    :param config: Optional data for a generated ini file. A map of <section-name> ->
+    map of key -> value. If order in the ini file matters, this should be an OrderedDict.
+    :param kwargs: Extra keyword args to pass to `subprocess.Popen`.
+    :returns a PantsResult instance.
+    """
+    with self.temporary_workdir() as workdir:
+      yield self.run_pants_with_workdir(
+        command,
+        workdir,
+        config,
+        stdin_data=stdin_data,
+        extra_env=extra_env,
+        **kwargs
+      )
+
+  def bundle_and_run(self, target, bundle_name, bundle_jar_name=None, bundle_options=None,
+                     args=None,
+                     expected_bundle_jar_content=None,
+                     expected_bundle_content=None,
+                     library_jars_are_symlinks=True):
+    """Creates the bundle with pants, then does java -jar {bundle_name}.jar to execute the bundle.
+
+    :param target: target name to compile
+    :param bundle_name: resulting bundle filename (minus .zip extension)
+    :param bundle_jar_name: monolithic jar filename (minus .jar extension), if None will be the
+      same as bundle_name
+    :param bundle_options: additional options for bundle
+    :param args: optional arguments to pass to executable
+    :param expected_bundle_content: verify the bundle zip content
+    :param expected_bundle_jar_content: verify the bundle jar content
+    :param library_jars_are_symlinks: verify library jars are symlinks if True, and actual
+      files if False. Default `True` because we always create symlinks for both external and internal
+      dependencies, only exception is when shading is used.
+    :return: stdout as a string on success, raises an Exception on error
+    """
+    bundle_jar_name = bundle_jar_name or bundle_name
+    bundle_options = bundle_options or []
+    bundle_options = ['bundle.jvm'] + bundle_options + ['--archive=zip', target]
+    with self.pants_results(bundle_options) as pants_run:
+      self.assert_success(pants_run)
+
+      self.assertTrue(check_symlinks(f'dist/{bundle_name}-bundle/libs', library_jars_are_symlinks))
+      # TODO(John Sirois): We need a zip here to suck in external library classpath elements
+      # pointed to by symlinks in the run_pants ephemeral tmpdir.  Switch run_pants to be a
+      # contextmanager that yields its results while the tmpdir workdir is still active and change
+      # this test back to using an un-archived bundle.
+      with temporary_dir() as workdir:
+        ZIP.extract('dist/{bundle_name}.zip'.format(bundle_name=bundle_name), workdir)
+        if expected_bundle_content:
+          self.assertTrue(contains_exact_files(workdir, expected_bundle_content))
+        if expected_bundle_jar_content:
+          with temporary_dir() as check_bundle_jar_dir:
+            bundle_jar = os.path.join(workdir, f'{bundle_jar_name}.jar')
+            ZIP.extract(bundle_jar, check_bundle_jar_dir)
+            self.assertTrue(contains_exact_files(check_bundle_jar_dir, expected_bundle_jar_content))
+
+        optional_args = []
+        if args:
+          optional_args = args
+        java_run = subprocess.Popen(
+          ['java', '-jar', f'{bundle_jar_name}.jar'] + optional_args,
+          stdout=subprocess.PIPE,
+          cwd=workdir
+        )
+
+        stdout, _ = java_run.communicate()
+      java_returncode = java_run.returncode
+      self.assertEqual(java_returncode, 0)
+      return stdout.decode()
+
+  def assert_success(self, pants_run: PantsResult, msg=None):
+    self.assert_result(pants_run, PANTS_SUCCEEDED_EXIT_CODE, expected=True, msg=msg)
+
+  def assert_failure(self, pants_run: PantsResult, msg=None):
+    self.assert_result(pants_run, PANTS_SUCCEEDED_EXIT_CODE, expected=False, msg=msg)
+
+  def assert_result(self, pants_run: PantsResult, value, expected=True, msg=None):
+    check, assertion = (eq, self.assertEqual) if expected else (ne, self.assertNotEqual)
+    if check(pants_run.returncode, value):
+      return
+
+    details = [msg] if msg else []
+    details.append(' '.join(pants_run.command))
+    details.append(f'returncode: {pants_run.returncode}')
+
+    def indent(content):
+      return '\n\t'.join(content.splitlines())
+
+    details.append(f'stdout:\n\t{indent(pants_run.stdout_data)}')
+    details.append(f'stderr:\n\t{indent(pants_run.stderr_data)}')
+    error_msg = '\n'.join(details)
+
+    assertion(value, pants_run.returncode, error_msg)
+
+  def assert_run_contains_log(self, msg, level, module, pants_run: PantsResult):
+    """Asserts that the passed run's stderr contained the log message."""
+    self.assert_contains_log(msg, level, module, pants_run.stderr_data, pants_run.pid)
+
+  def assert_contains_log(self, msg, level, module, log, pid=None):
+    """
+    Asserts that the passed log contains the message logged by the module at the level.
+
+    If pid is specified, performs an exact match including the pid of the pants process.
+    Otherwise performs a regex match asserting that some pid is present.
+    """
+    prefix = f"[{level}] {module}:pid="
+    suffix = f": {msg}"
+    if pid is None:
+      self.assertRegex(log, re.escape(prefix) + r"\d+" + re.escape(suffix))
+    else:
+      self.assertIn(f"{prefix}{pid}{suffix}", log)
+
+  def assert_is_file(self, file_path):
+    self.assertTrue(os.path.isfile(file_path), f'file path {file_path} does not exist!')
+
+  def assert_is_not_file(self, file_path):
+    self.assertFalse(os.path.isfile(file_path), f'file path {file_path} exists!')
+
+  def normalize(self, s: str) -> str:
+    """Removes escape sequences (e.g. colored output) and all whitespace from string s."""
+    return ''.join(strip_color(s).split())
+
+  @contextmanager
+  def file_renamed(self, prefix, test_name, real_name):
+    real_path = os.path.join(prefix, real_name)
+    test_path = os.path.join(prefix, test_name)
+    try:
+      os.rename(test_path, real_path)
+      yield
+    finally:
+      os.rename(real_path, test_path)
+
+  @contextmanager
+  def temporary_file_content(self, path, content, binary_mode=True):
+    """Temporarily write content to a file for the purpose of an integration test."""
+    path = os.path.realpath(path)
+    assert path.startswith(
+      os.path.realpath(get_buildroot())), 'cannot write paths outside of the buildroot!'
+    assert not os.path.exists(path), 'refusing to overwrite an existing path!'
+    mode = 'wb' if binary_mode else 'w'
+    with open(path, mode) as fh:
+      fh.write(content)
+    try:
+      yield
+    finally:
+      os.unlink(path)
+
+  @contextmanager
+  def with_overwritten_file_content(self, file_path, temporary_content=None):
+    """A helper that resets a file after the method runs.
+
+     It will read a file, save the content, maybe write temporary_content to it, yield, then write the
+     original content to the file.
+
+    :param file_path: Absolute path to the file to be reset after the method runs.
+    :param temporary_content: Optional content to write into the file.
+    """
+    with open(file_path, 'r') as f:
+      file_original_content = f.read()
+
+    try:
+      if temporary_content is not None:
+        with open(file_path, 'w') as f:
+          f.write(temporary_content)
+      yield
+
+    finally:
+      with open(file_path, 'w') as f:
+        f.write(file_original_content)
+
+  @contextmanager
+  def mock_buildroot(self, dirs_to_copy=None):
+    """Construct a mock buildroot and return a helper object for interacting with it."""
+
+    @dataclass(frozen=True)
+    class Manager:
+      write_file: Callable[[str, str], None]
+      pushd: Any
+      new_buildroot: str
+
+    # N.B. BUILD.tools, contrib, 3rdparty needs to be copied vs symlinked to avoid
+    # symlink prefix check error in v1 and v2 engine.
+    files_to_copy = ('BUILD.tools',)
+    files_to_link = (
+      'BUILD_ROOT',
+      '.pants.d',
+      'build-support',
+      # NB: when running with --chroot or the V2 engine, `pants` refers to the source root-stripped
+      # directory src/python/pants, not the script `./pants`.
+      'pants',
+      'pants.pex',
+      'pants-plugins',
+      'pants.ini',
+      'pants.travis-ci.ini',
+      'pyproject.toml',
+      'rust-toolchain',
+      'src',
+    )
+    dirs_to_copy = ('3rdparty', 'contrib') + tuple(dirs_to_copy or [])
+
+    with self.temporary_workdir() as tmp_dir:
+      for filename in files_to_copy:
+        shutil.copy(os.path.join(get_buildroot(), filename), os.path.join(tmp_dir, filename))
+
+      for dirname in dirs_to_copy:
+        shutil.copytree(os.path.join(get_buildroot(), dirname), os.path.join(tmp_dir, dirname))
+
+      for filename in files_to_link:
+        link_target = os.path.join(get_buildroot(), filename)
+        if os.path.exists(link_target):
+          os.symlink(link_target, os.path.join(tmp_dir, filename))
+
+      def write_file(file_path, contents):
+        full_file_path = os.path.join(tmp_dir, *file_path.split(os.pathsep))
+        safe_mkdir_for(full_file_path)
+        with open(full_file_path, 'w') as fh:
+          fh.write(contents)
+
+      @contextmanager
+      def dir_context():
+        with pushd(tmp_dir):
+          yield
+
+      yield Manager(write_file, dir_context, tmp_dir)
+
+  def do_command(self, *args, **kwargs) -> PantsResult:
+    """Wrapper around run_pants method.
+
+    :param args: command line arguments used to run pants
+    """
+    cmd = list(args)
+    success = kwargs.pop('success', True)
+    pants_run = self.run_pants(cmd, **kwargs)
+    if success:
+      self.assert_success(pants_run)
+    else:
+      self.assert_failure(pants_run)
+    return pants_run
+
+  @contextmanager
+  def do_command_yielding_workdir(self, *args, **kwargs):
+    cmd = list(args)
+    success = kwargs.pop('success', True)
+    with self.pants_results(cmd, **kwargs) as pants_run:
+      if success:
+        self.assert_success(pants_run)
+      else:
+        self.assert_failure(pants_run)
+      yield pants_run
diff --git a/src/python/pants/testutil/pexrc_util.py b/src/python/pants/testutil/pexrc_util.py
new file mode 100644
index 00000000000..8c2d873f12d
--- /dev/null
+++ b/src/python/pants/testutil/pexrc_util.py
@@ -0,0 +1,33 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+import os
+from contextlib import contextmanager
+
+from pants.base.build_environment import get_pants_cachedir
+from pants.util.contextutil import environment_as, temporary_dir
+from pants.util.dirutil import safe_mkdir_for
+
+
+@contextmanager
+def setup_pexrc_with_pex_python_path(interpreter_paths):
+  """A helper function for writing interpreter paths to a PEX_PYTHON_PATH variable in a .pexrc file.
+
+  NB: Mutates HOME and XDG_CACHE_HOME to ensure a `~/.pexrc` that won't trample any existing file
+  and will also be found.
+
+  :param list interpreter_paths: a list of paths to interpreter binaries to include on
+                                 PEX_PYTHON_PATH.
+  """
+  cache_dir = get_pants_cachedir()
+  with temporary_dir() as home:
+    xdg_cache_home = os.path.join(home, '.cache')
+    with environment_as(HOME=home, XDG_CACHE_HOME=xdg_cache_home):
+      target = os.path.join(xdg_cache_home, os.path.basename(cache_dir))
+      safe_mkdir_for(target)
+      os.symlink(cache_dir, target)
+
+      with open(os.path.join(home, '.pexrc'), 'w') as pexrc:
+        pexrc.write('PEX_PYTHON_PATH={}'.format(':'.join(interpreter_paths)))
+
+      yield
diff --git a/src/python/pants/testutil/process_test_util.py b/src/python/pants/testutil/process_test_util.py
new file mode 100644
index 00000000000..9a4b311b5d9
--- /dev/null
+++ b/src/python/pants/testutil/process_test_util.py
@@ -0,0 +1,58 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+from contextlib import contextmanager
+from dataclasses import dataclass
+from typing import Any
+
+import psutil
+
+
+class ProcessStillRunning(AssertionError):
+  """Raised when a process shouldn't be running but is."""
+
+
+def _safe_iter_matching_processes(name):
+  for proc in psutil.process_iter():
+    try:
+      if name in ''.join(proc.cmdline()):
+        yield proc
+    except (psutil.NoSuchProcess, psutil.AccessDenied):
+      pass
+
+
+def _make_process_table(processes):
+  line_tmpl = '{0:>7} {1:>7} {2}'
+  proc_tuples = [(p.pid, p.ppid(), ''.join(p.cmdline())) for p in processes]
+  return '\n'.join(
+    [
+      line_tmpl.format('PID', 'PGID', 'CMDLINE')
+    ] + [
+      line_tmpl.format(*t) for t in sorted(proc_tuples)
+    ]
+  )
+
+
+@contextmanager
+def no_lingering_process_by_command(name):
+  """Asserts that no process exists for a given command with a helpful error, excluding
+  existing processes outside of the scope of the contextmanager."""
+  context = TrackedProcessesContext(name, set(_safe_iter_matching_processes(name)))
+  yield context
+  delta_processes = context.current_processes()
+  if delta_processes:
+    raise ProcessStillRunning(
+      '{} {} processes lingered after tests:\n{}'
+      .format(len(delta_processes), name, _make_process_table(delta_processes))
+    )
+
+
+@dataclass(frozen=True)
+class TrackedProcessesContext:
+  name: Any
+  before_processes: Any
+
+  def current_processes(self):
+    """Returns the current set of matching processes created since the context was entered."""
+    after_processes = set(_safe_iter_matching_processes(self.name))
+    return after_processes.difference(self.before_processes)
diff --git a/src/python/pants/testutil/subsystem/BUILD b/src/python/pants/testutil/subsystem/BUILD
new file mode 100644
index 00000000000..c9ba307e7aa
--- /dev/null
+++ b/src/python/pants/testutil/subsystem/BUILD
@@ -0,0 +1,9 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+python_library(
+  dependencies=[
+    'src/python/pants/subsystem',
+    'src/python/pants/testutil/option',
+  ],
+)
diff --git a/src/python/pants/testutil/subsystem/__init__.py b/src/python/pants/testutil/subsystem/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/src/python/pants/testutil/subsystem/util.py b/src/python/pants/testutil/subsystem/util.py
new file mode 100644
index 00000000000..320cfb75c44
--- /dev/null
+++ b/src/python/pants/testutil/subsystem/util.py
@@ -0,0 +1,75 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+from pants.subsystem.subsystem import Subsystem
+from pants.testutil.option.fakes import create_options_for_optionables
+
+
+def global_subsystem_instance(subsystem_type, options=None):
+  """Returns the global instance of a subsystem, for use in tests.
+
+  :API: public
+
+  :param type subsystem_type: The subclass of :class:`pants.subsystem.subsystem.Subsystem`
+                              to create.
+  :param options: dict of scope -> (dict of option name -> value).
+                  The scopes may be that of the global instance of the subsystem (i.e.,
+                  subsystem_type.options_scope) and/or the scopes of instances of the
+                  subsystems it transitively depends on.
+  """
+  init_subsystem(subsystem_type, options)
+  return subsystem_type.global_instance()
+
+
+def init_subsystems(subsystem_types, options=None):
+  """Initialize subsystems for use in tests.
+
+  Does not create an instance.  This function is for setting up subsystems that the code
+  under test creates.
+
+  Note that there is some redundancy between this function and BaseTest.context(for_subsystems=...).
+  TODO: Fix that.
+
+  :API: public
+
+  :param list subsystem_types: The subclasses of :class:`pants.subsystem.subsystem.Subsystem`
+                               to create.
+  :param options: dict of scope -> (dict of option name -> value).
+                  The scopes may be those of the global instances of the subsystems (i.e.,
+                  subsystem_type.options_scope) and/or the scopes of instances of the
+                  subsystems they transitively depend on.
+  """
+  optionables = set()
+  for s in subsystem_types:
+    if not Subsystem.is_subsystem_type(s):
+      raise TypeError('{} is not a subclass of `Subsystem`'.format(s))
+    for si in s.known_scope_infos():
+      optionables.add(si.optionable_cls)
+  if options:
+    allowed_scopes = {o.options_scope for o in optionables}
+    for scope in options.keys():
+      if scope != '' and scope not in allowed_scopes:
+        raise ValueError('`{}` is not the scope of any of these subsystems: {}'.format(
+          scope, optionables))
+  # Don't trample existing subsystem options, in case a test has set up some
+  # other subsystems in some other way.
+  updated_options = dict(Subsystem._options.items()) if Subsystem._options else {}
+  if options:
+    updated_options.update(options)
+  Subsystem.set_options(create_options_for_optionables(optionables, options=updated_options))
+
+
+def init_subsystem(subsystem_type, options=None):
+  """
+  Singular form of :func:`pants_test.subsystem.subsystem_util.init_subsystems`
+
+  :API: public
+
+  :param subsystem_type: The subclass of :class:`pants.subsystem.subsystem.Subsystem`
+                               to create.
+  :param options: dict of scope -> (dict of option name -> value).
+                  The scopes may be those of the global instance of the subsystem (i.e.,
+                  subsystem_type.options_scope) and/or the scopes of instance of the
+                  subsystem it transitively depends on.
+  """
+  init_subsystems([subsystem_type], options)
diff --git a/src/python/pants/testutil/task_test_base.py b/src/python/pants/testutil/task_test_base.py
new file mode 100644
index 00000000000..797f9c755ec
--- /dev/null
+++ b/src/python/pants/testutil/task_test_base.py
@@ -0,0 +1,391 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+import glob
+import os
+import subprocess
+from abc import abstractmethod
+from contextlib import closing, contextmanager
+from dataclasses import dataclass
+from io import BytesIO
+from typing import Any, Tuple
+
+from pants.goal.goal import Goal
+from pants.ivy.bootstrapper import Bootstrapper
+from pants.task.console_task import ConsoleTask
+from pants.task.task import Task
+from pants.testutil.test_base import TestBase
+from pants.util.contextutil import temporary_dir
+from pants.util.memo import memoized_method
+from pants.util.meta import classproperty
+
+
+# TODO: Find a better home for this?
+def is_exe(name):
+  result = subprocess.call(['which', name], stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT)
+  return result == 0
+
+
+def ensure_cached(task_cls, expected_num_artifacts=None):
+  """Decorator for a task-executing unit test. Asserts that after running the
+  decorated test function, the cache for task_cls contains
+  expected_num_artifacts.
+
+  Uses a new temp dir for the artifact cache, and uses a glob based on the
+  task's synthesized subtype to find the cache directories within the new temp
+  dir which were generated by the actions performed within the test method.
+
+  :API: public
+
+  :param task_cls: Class of the task to check the artifact cache
+                   for (e.g. JarCreate).
+  :param expected_num_artifacts: Expected number of artifacts to be in the
+                                 task's cache after running the test. If
+                                 unspecified, will assert that the number of
+                                 artifacts in the cache is non-zero.
+  """
+  def decorator(test_fn):
+    def wrapper(self, *args, **kwargs):
+      with self.cache_check(expected_num_artifacts=expected_num_artifacts):
+        test_fn(self, *args, **kwargs)
+    return wrapper
+  return decorator
+
+
+class TaskTestBase(TestBase):
+  """A baseclass useful for testing a single Task type.
+
+  :API: public
+  """
+
+  options_scope = 'test_scope'
+
+  @classmethod
+  @abstractmethod
+  def task_type(cls):
+    """Subclasses must return the type of the Task subclass under test.
+
+    :API: public
+    """
+
+  def setUp(self):
+    """
+    :API: public
+    """
+    super().setUp()
+    self._testing_task_type = self.synthesize_task_subtype(self.task_type(), self.options_scope)
+    # We locate the workdir below the pants_workdir, which BaseTest locates within the BuildRoot.
+    # BaseTest cleans this up, so we don't need to.  We give it a stable name, so that we can
+    # use artifact caching to speed up tests.
+    self._test_workdir = os.path.join(self.pants_workdir, self.task_type().stable_name())
+    os.mkdir(self._test_workdir)
+    # TODO: Push this down to JVM-related tests only? Seems wrong to have an ivy-specific
+    # action in this non-JVM-specific, high-level base class.
+    Bootstrapper.reset_instance()
+
+  @property
+  def test_workdir(self):
+    """
+    :API: public
+    """
+    return self._test_workdir
+
+  def synthesize_task_subtype(self, task_type, options_scope):
+    """Creates a synthetic subclass of the task type.
+
+    Note that passing in a stable options scope will speed up some tests, as the scope may appear
+    in the paths of tools used by the task, and if these are stable, tests can get artifact
+    cache hits when bootstrapping these tools. This doesn't hurt test isolation, as we reset
+    class-level state between each test.
+
+    # TODO: Use the task type directly once we re-do the Task lifecycle.
+
+    :API: public
+
+    :param task_type: The task type to subtype.
+    :param options_scope: The scope to give options on the generated task type.
+    :return: A pair (type, options_scope)
+    """
+    subclass_name = 'test_{0}_{1}'.format(task_type.__name__, options_scope)
+    return type(subclass_name, (task_type,), {'_stable_name': task_type._compute_stable_name(),
+                                              'options_scope': options_scope})
+
+  def set_options(self, **kwargs):
+    """
+    :API: public
+    """
+    self.set_options_for_scope(self.options_scope, **kwargs)
+
+  def context(self, for_task_types=None, **kwargs):
+    """
+    :API: public
+    """
+    # Add in our task type.
+    for_task_types = [self._testing_task_type] + (for_task_types or [])
+    return super().context(for_task_types=for_task_types, **kwargs)
+
+  def create_task(self, context, workdir=None):
+    """
+    :API: public
+    """
+    if workdir is None:
+      workdir = self.test_workdir
+    return self._testing_task_type(context, workdir)
+
+  @contextmanager
+  def cache_check(self, expected_num_artifacts=None):
+    """Sets up a temporary artifact cache and checks that the yielded-to code populates it.
+
+    :param expected_num_artifacts: Expected number of artifacts to be in the cache after yielding.
+                                   If unspecified, will assert that the number of artifacts in the
+                                   cache is non-zero.
+    """
+    with temporary_dir() as artifact_cache:
+      self.set_options_for_scope('cache.{}'.format(self.options_scope),
+                                 write_to=[artifact_cache])
+
+      yield
+
+      cache_subdir_glob_str = os.path.join(artifact_cache, '*/')
+      cache_subdirs = glob.glob(cache_subdir_glob_str)
+
+      if expected_num_artifacts == 0:
+        self.assertEqual(len(cache_subdirs), 0)
+        return
+
+      self.assertEqual(len(cache_subdirs), 1)
+      task_cache = cache_subdirs[0]
+
+      num_artifacts = 0
+      for (_, _, files) in os.walk(task_cache):
+        num_artifacts += len(files)
+
+      if expected_num_artifacts is None:
+        self.assertNotEqual(num_artifacts, 0)
+      else:
+        self.assertEqual(num_artifacts, expected_num_artifacts)
+
+
+class ConsoleTaskTestBase(TaskTestBase):
+  """A base class useful for testing ConsoleTasks.
+
+  :API: public
+  """
+
+  def setUp(self):
+    """
+    :API: public
+    """
+    Goal.clear()
+    super().setUp()
+
+    task_type = self.task_type()
+    assert issubclass(task_type, ConsoleTask), \
+        'task_type() must return a ConsoleTask subclass, got %s' % task_type
+
+  def execute_task(self, targets=None, options=None):
+    """Creates a new task and executes it with the given config, command line args and targets.
+
+    :API: public
+
+    :param targets: Optional list of Target objects passed on the command line.
+    Returns the text output of the task.
+    """
+    options = options or {}
+    with closing(BytesIO()) as output:
+      self.set_options(**options)
+      context = self.context(target_roots=targets, console_outstream=output)
+      task = self.create_task(context)
+      task.execute()
+      return output.getvalue().decode()
+
+  def execute_console_task(self, targets=None, extra_targets=None, options=None,
+                           passthru_args=None, workspace=None, scheduler=None):
+    """Creates a new task and executes it with the given config, command line args and targets.
+
+    :API: public
+
+    :param options: option values.
+    :param targets: optional list of Target objects passed on the command line.
+    :param extra_targets: optional list of extra targets in the context in addition to those
+                          passed on the command line.
+    :param passthru_args: optional list of passthru_args
+    :param workspace: optional Workspace to pass into the context.
+
+    Returns the list of items returned from invoking the console task's console_output method.
+    """
+    options = options or {}
+    self.set_options(**options)
+    context = self.context(
+      target_roots=targets,
+      passthru_args=passthru_args,
+      workspace=workspace,
+      scheduler=scheduler
+    )
+    return self.execute_console_task_given_context(context, extra_targets=extra_targets)
+
+  def execute_console_task_given_context(self, context, extra_targets=None):
+    """Creates a new task and executes it with the context and extra targets.
+
+    :API: public
+
+    :param context: The pants run context to use.
+    :param extra_targets: An optional list of extra targets in the context in addition to those
+                          passed on the command line.
+    :returns: The list of items returned from invoking the console task's console_output method.
+    :rtype: list of strings
+    """
+    task = self.create_task(context)
+    input_targets = task.get_targets() if task.act_transitively else context.target_roots
+    return list(task.console_output(list(input_targets) + list(extra_targets or ())))
+
+  def assert_entries(self, sep, *output, **kwargs):
+    """Verifies the expected output text is flushed by the console task under test.
+
+    NB: order of entries is not tested, just presence.
+
+    :API: public
+
+    sep:      the expected output separator.
+    *output:  the output entries expected between the separators
+    **options: additional options passed to execute_task.
+    """
+    # We expect each output line to be suffixed with the separator, so for , and [1,2,3] we expect:
+    # '1,2,3,' - splitting this by the separator we should get ['1', '2', '3', ''] - always an extra
+    # empty string if the separator is properly always a suffix and not applied just between
+    # entries.
+    self.assertEqual(sorted(list(output) + ['']), sorted((self.execute_task(**kwargs)).split(sep)))
+
+  def assert_console_output(self, *output, **kwargs):
+    """Verifies the expected output entries are emitted by the console task under test.
+
+    NB: order of entries is not tested, just presence.
+
+    :API: public
+
+    *output:  the expected output entries
+    **kwargs: additional kwargs passed to execute_console_task.
+    """
+    self.assertEqual(sorted(output), sorted(self.execute_console_task(**kwargs)))
+
+  def assert_console_output_contains(self, output, **kwargs):
+    """Verifies the expected output string is emitted by the console task under test.
+
+    :API: public
+
+    output:  the expected output entry(ies)
+    **kwargs: additional kwargs passed to execute_console_task.
+    """
+    self.assertIn(output, self.execute_console_task(**kwargs))
+
+  def assert_console_output_ordered(self, *output, **kwargs):
+    """Verifies the expected output entries are emitted by the console task under test.
+
+    NB: order of entries is tested.
+
+    :API: public
+
+    *output:  the expected output entries in expected order
+    **kwargs: additional kwargs passed to execute_console_task.
+    """
+    self.assertEqual(list(output), self.execute_console_task(**kwargs))
+
+  def assert_console_raises(self, exception, **kwargs):
+    """Verifies the expected exception is raised by the console task under test.
+
+    :API: public
+
+    **kwargs: additional kwargs are passed to execute_console_task.
+    """
+    with self.assertRaises(exception):
+      self.execute_console_task(**kwargs)
+
+
+class DeclarativeTaskTestMixin:
+  """Experimental mixin for task tests allows specifying tasks to be run before or after the task.
+
+  Calling `self.invoke_tasks()` will create instances of and execute task types in
+  `self.run_before_task_types()`, then `task_type()`, then `self.run_after_task_types()`.
+  """
+
+  @classproperty
+  def run_before_task_types(cls):
+    return []
+
+  @classproperty
+  def run_after_task_types(cls):
+    return []
+
+  @memoized_method
+  def _synthesize_task_types(self, task_types=()):
+    return [
+      self.synthesize_task_subtype(tsk, '__tmp_{}'.format(tsk.__name__))
+      # TODO(#7127): make @memoized_method convert lists to tuples for hashing!
+      for tsk in task_types
+    ]
+
+  def _create_task(self, task_type, context):
+    """Helper method to instantiate tasks besides self._testing_task_type in the test workdir."""
+    return task_type(context, self.test_workdir)
+
+  @dataclass(frozen=True)
+  class TaskInvocationResult:
+    context: Any
+    before_tasks: Tuple[Task, ...]
+    this_task: Task
+    after_tasks: Tuple[Task, ...]
+
+  def invoke_tasks(self, target_closure=None, **context_kwargs) -> 'TaskInvocationResult':
+    """Create and execute the declaratively specified tasks in order.
+
+    Create instances of and execute task types in `self.run_before_task_types()`, then
+    `task_type()`, then `self.run_after_task_types()`.
+
+    :param Iterable target_closure: If not None, check that the build graph contains exactly these
+                                    targets before executing the tasks.
+    :param **context_kwargs: kwargs passed to `self.context()`. Note that this method already sets
+                                    `for_task_types`.
+    :return: A datatype containing the created context and the task instances which were executed.
+    :raises: If any exception is raised during task execution, the context will be attached to the
+             exception object as the attribute '_context' with setattr() before re-raising.
+    """
+    run_before_synthesized_task_types = self._synthesize_task_types(tuple(self.run_before_task_types))
+    run_after_synthesized_task_types = self._synthesize_task_types(tuple(self.run_after_task_types))
+    all_synthesized_task_types = run_before_synthesized_task_types + [
+      self._testing_task_type,
+    ] + run_after_synthesized_task_types
+
+    context = self.context(
+      for_task_types=all_synthesized_task_types,
+      **context_kwargs)
+    if target_closure is not None:
+      self.assertEqual(set(target_closure), set(context.build_graph.targets()))
+
+    run_before_task_instances = [
+      self._create_task(task_type, context)
+      for task_type in run_before_synthesized_task_types
+    ]
+    current_task_instance = self._create_task(
+      self._testing_task_type, context)
+    run_after_task_instances = [
+      self._create_task(task_type, context)
+      for task_type in run_after_synthesized_task_types
+    ]
+    all_task_instances = run_before_task_instances + [
+      current_task_instance
+    ] + run_after_task_instances
+
+    try:
+      for tsk in all_task_instances:
+        tsk.execute()
+    except Exception as e:
+      # TODO(#7644): Remove this hack before anything more starts relying on it!
+      setattr(e, '_context', context)
+      raise e
+
+    return self.TaskInvocationResult(
+      context=context,
+      before_tasks=tuple(run_before_task_instances),
+      this_task=current_task_instance,
+      after_tasks=tuple(run_after_task_instances),
+    )
diff --git a/src/python/pants/testutil/test_base.py b/src/python/pants/testutil/test_base.py
new file mode 100644
index 00000000000..d7ab87f14df
--- /dev/null
+++ b/src/python/pants/testutil/test_base.py
@@ -0,0 +1,781 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
+# Licensed under the Apache License, Version 2.0 (see LICENSE).
+
+import itertools
+import logging
+import os
+import unittest
+import warnings
+from abc import ABC, ABCMeta, abstractmethod
+from collections import defaultdict
+from contextlib import contextmanager
+from tempfile import mkdtemp
+from textwrap import dedent
+
+from pants.base.build_root import BuildRoot
+from pants.base.cmd_line_spec_parser import CmdLineSpecParser
+from pants.base.exceptions import TaskError
+from pants.base.target_roots import TargetRoots
+from pants.build_graph.address import Address
+from pants.build_graph.build_configuration import BuildConfiguration
+from pants.build_graph.build_file_aliases import BuildFileAliases
+from pants.build_graph.target import Target
+from pants.engine.fs import PathGlobs, PathGlobsAndRoot
+from pants.engine.legacy.graph import HydratedField
+from pants.engine.legacy.structs import SourcesField
+from pants.engine.rules import RootRule
+from pants.init.engine_initializer import EngineInitializer
+from pants.init.util import clean_global_runtime_state
+from pants.option.options_bootstrapper import OptionsBootstrapper
+from pants.source.source_root import SourceRootConfig
+from pants.subsystem.subsystem import Subsystem
+from pants.task.goal_options_mixin import GoalOptionsMixin
+from pants.testutil.base.context_utils import create_context_from_options
+from pants.testutil.engine.util import init_native
+from pants.testutil.option.fakes import create_options_for_optionables
+from pants.testutil.subsystem import util as subsystem_util
+from pants.util.collections import assert_single_element
+from pants.util.contextutil import temporary_dir
+from pants.util.dirutil import (
+  recursive_dirname,
+  relative_symlink,
+  safe_file_dump,
+  safe_mkdir,
+  safe_mkdtemp,
+  safe_open,
+  safe_rmtree,
+)
+from pants.util.memo import memoized_method
+from pants.util.meta import classproperty
+
+
+class AbstractTestGenerator(ABC):
+  """A mixin that facilitates test generation at runtime."""
+
+  @classmethod
+  @abstractmethod
+  def generate_tests(cls):
+    """Generate tests for a given class.
+
+    This should be called against the composing class in its defining module, e.g.
+
+      class ThingTest(TestGenerator):
+        ...
+
+      ThingTest.generate_tests()
+
+    """
+
+  @classmethod
+  def add_test(cls, method_name, method):
+    """A classmethod that adds dynamic test methods to a given class.
+
+    :param string method_name: The name of the test method (e.g. `test_thing_x`).
+    :param callable method: A callable representing the method. This should take a 'self' argument
+                            as its first parameter for instance method binding.
+    """
+    assert not hasattr(cls, method_name), (
+      'a test with name `{}` already exists on `{}`!'.format(method_name, cls.__name__)
+    )
+    assert method_name.startswith('test_'), '{} is not a valid test name!'.format(method_name)
+    setattr(cls, method_name, method)
+
+
+class TestBase(unittest.TestCase, metaclass=ABCMeta):
+  """A baseclass useful for tests requiring a temporary buildroot.
+
+  :API: public
+  """
+
+  _scheduler = None
+  _local_store_dir = None
+  _build_graph = None
+  _address_mapper = None
+
+  def build_path(self, relpath):
+    """Returns the canonical BUILD file path for the given relative build path.
+
+    :API: public
+    """
+    if os.path.basename(relpath).startswith('BUILD'):
+      return relpath
+    else:
+      return os.path.join(relpath, 'BUILD')
+
+  def create_dir(self, relpath):
+    """Creates a directory under the buildroot.
+
+    :API: public
+
+    relpath: The relative path to the directory from the build root.
+    """
+    path = os.path.join(self.build_root, relpath)
+    safe_mkdir(path)
+    self.invalidate_for(relpath)
+    return path
+
+  def create_workdir_dir(self, relpath):
+    """Creates a directory under the work directory.
+
+    :API: public
+
+    relpath: The relative path to the directory from the work directory.
+    """
+    path = os.path.join(self.pants_workdir, relpath)
+    safe_mkdir(path)
+    self.invalidate_for(relpath)
+    return path
+
+  def invalidate_for(self, *relpaths):
+    """Invalidates all files from the relpath, recursively up to the root.
+
+    Many python operations implicitly create parent directories, so we assume that touching a
+    file located below directories that do not currently exist will result in their creation.
+    """
+    if self._scheduler is None:
+      return
+    files = {f for relpath in relpaths for f in recursive_dirname(relpath)}
+    return self._scheduler.invalidate_files(files)
+
+  def create_link(self, relsrc, reldst):
+    """Creates a symlink within the buildroot.
+
+    :API: public
+
+    relsrc: A relative path for the source of the link.
+    reldst: A relative path for the destination of the link.
+    """
+    src = os.path.join(self.build_root, relsrc)
+    dst = os.path.join(self.build_root, reldst)
+    relative_symlink(src, dst)
+    self.invalidate_for(reldst)
+
+  def create_file(self, relpath, contents='', mode='w'):
+    """Writes to a file under the buildroot.
+
+    :API: public
+
+    relpath:  The relative path to the file from the build root.
+    contents: A string containing the contents of the file - '' by default..
+    mode:     The mode to write to the file in - over-write by default.
+    """
+    path = os.path.join(self.build_root, relpath)
+    with safe_open(path, mode=mode) as fp:
+      fp.write(contents)
+    self.invalidate_for(relpath)
+    return path
+
+  def create_files(self, path, files):
+    """Writes to a file under the buildroot with contents same as file name.
+
+    :API: public
+
+     path:  The relative path to the file from the build root.
+     files: List of file names.
+    """
+    for f in files:
+      self.create_file(os.path.join(path, f), contents=f)
+
+  def create_workdir_file(self, relpath, contents='', mode='w'):
+    """Writes to a file under the work directory.
+
+    :API: public
+
+    relpath:  The relative path to the file from the work directory.
+    contents: A string containing the contents of the file - '' by default..
+    mode:     The mode to write to the file in - over-write by default.
+    """
+    path = os.path.join(self.pants_workdir, relpath)
+    with safe_open(path, mode=mode) as fp:
+      fp.write(contents)
+    return path
+
+  def add_to_build_file(self, relpath, target):
+    """Adds the given target specification to the BUILD file at relpath.
+
+    :API: public
+
+    relpath: The relative path to the BUILD file from the build root.
+    target:  A string containing the target definition as it would appear in a BUILD file.
+    """
+    self.create_file(self.build_path(relpath), target, mode='a')
+
+  def make_target(self,
+                  spec='',
+                  target_type=Target,
+                  dependencies=None,
+                  derived_from=None,
+                  synthetic=False,
+                  make_missing_sources=True,
+                  **kwargs):
+    """Creates a target and injects it into the test's build graph.
+
+    :API: public
+
+    :param string spec: The target address spec that locates this target.
+    :param type target_type: The concrete target subclass to create this new target from.
+    :param list dependencies: A list of target instances this new target depends on.
+    :param derived_from: The target this new target was derived from.
+    :type derived_from: :class:`pants.build_graph.target.Target`
+    """
+    self._init_target_subsystem()
+
+    address = Address.parse(spec)
+
+    if make_missing_sources and 'sources' in kwargs:
+      for source in kwargs['sources']:
+        if '*' not in source:
+          self.create_file(os.path.join(address.spec_path, source), mode='a', contents='')
+      kwargs['sources'] = self.sources_for(kwargs['sources'], address.spec_path)
+
+    target = target_type(name=address.target_name,
+                         address=address,
+                         build_graph=self.build_graph,
+                         **kwargs)
+    dependencies = dependencies or []
+
+    self.build_graph.apply_injectables([target])
+    self.build_graph.inject_target(target,
+                                   dependencies=[dep.address for dep in dependencies],
+                                   derived_from=derived_from,
+                                   synthetic=synthetic)
+
+    # TODO(John Sirois): This re-creates a little bit too much work done by the BuildGraph.
+    # Fixup the BuildGraph to deal with non BuildFileAddresses better and just leverage it.
+    traversables = [target.compute_dependency_specs(payload=target.payload)]
+
+    for dependency_spec in itertools.chain(*traversables):
+      dependency_address = Address.parse(dependency_spec, relative_to=address.spec_path)
+      dependency_target = self.build_graph.get_target(dependency_address)
+      if not dependency_target:
+        raise ValueError('Tests must make targets for dependency specs ahead of them '
+                         'being traversed, {} tried to traverse {} which does not exist.'
+                         .format(target, dependency_address))
+      if dependency_target not in target.dependencies:
+        self.build_graph.inject_dependency(dependent=target.address,
+                                           dependency=dependency_address)
+        target.mark_transitive_invalidation_hash_dirty()
+
+    return target
+
+  def sources_for(self, package_relative_path_globs, package_dir=''):
+    sources_field = SourcesField(
+      Address.parse('{}:_bogus_target_for_test'.format(package_dir)),
+      'sources',
+      {'globs': package_relative_path_globs},
+      None,
+      PathGlobs(tuple(os.path.join(package_dir, path) for path in package_relative_path_globs)),
+      lambda _: True,
+    )
+    field = self.scheduler.product_request(HydratedField, [sources_field])[0]
+    return field.value
+
+  @classmethod
+  def alias_groups(cls):
+    """
+    :API: public
+    """
+    return BuildFileAliases(targets={'target': Target})
+
+  @classmethod
+  def rules(cls):
+    # Required for sources_for:
+    return [RootRule(SourcesField)]
+
+  @classmethod
+  def build_config(cls):
+    build_config = BuildConfiguration()
+    build_config.register_aliases(cls.alias_groups())
+    build_config.register_rules(cls.rules())
+    return build_config
+
+  def setUp(self):
+    """
+    :API: public
+    """
+    super().setUp()
+    # Avoid resetting the Runtracker here, as that is specific to fork'd process cleanup.
+    clean_global_runtime_state(reset_subsystem=True)
+
+    self.addCleanup(self._reset_engine)
+
+    safe_mkdir(self.build_root, clean=True)
+    safe_mkdir(self.pants_workdir)
+    self.addCleanup(safe_rmtree, self.build_root)
+
+    BuildRoot().path = self.build_root
+    self.addCleanup(BuildRoot().reset)
+
+    self.subprocess_dir = os.path.join(self.build_root, '.pids')
+
+    self.options = defaultdict(dict)  # scope -> key-value mapping.
+    self.options[''] = {
+      'pants_workdir': self.pants_workdir,
+      'pants_supportdir': os.path.join(self.build_root, 'build-support'),
+      'pants_distdir': os.path.join(self.build_root, 'dist'),
+      'pants_configdir': os.path.join(self.build_root, 'config'),
+      'pants_subprocessdir': self.subprocess_dir,
+      'cache_key_gen_version': '0-test',
+    }
+    self.options['cache'] = {
+      'read_from': [],
+      'write_to': [],
+    }
+
+    self._build_configuration = self.build_config()
+    self._inited_target = False
+    subsystem_util.init_subsystem(Target.TagAssignments)
+
+  def buildroot_files(self, relpath=None):
+    """Returns the set of all files under the test build root.
+
+    :API: public
+
+    :param string relpath: If supplied, only collect files from this subtree.
+    :returns: All file paths found.
+    :rtype: set
+    """
+    def scan():
+      for root, dirs, files in os.walk(os.path.join(self.build_root, relpath or '')):
+        for f in files:
+          yield os.path.relpath(os.path.join(root, f), self.build_root)
+    return set(scan())
+
+  def _reset_engine(self):
+    if self._scheduler is not None:
+      self._build_graph.reset()
+      self._scheduler.invalidate_all_files()
+
+  @classmethod
+  def aggressively_reset_scheduler(cls):
+    cls._scheduler = None
+    if cls._local_store_dir is not None:
+      safe_rmtree(cls._local_store_dir)
+
+  @classmethod
+  @contextmanager
+  def isolated_local_store(cls):
+    cls.aggressively_reset_scheduler()
+    cls._init_engine()
+    try:
+      yield
+    finally:
+      cls.aggressively_reset_scheduler()
+
+  @property
+  def build_root(self):
+    return self._build_root()
+
+  @property
+  def pants_workdir(self):
+    return self._pants_workdir()
+
+  @classmethod
+  @memoized_method
+  def _build_root(cls):
+    return os.path.realpath(mkdtemp(suffix='_BUILD_ROOT'))
+
+  @classmethod
+  @memoized_method
+  def _pants_workdir(cls):
+    return os.path.join(cls._build_root(), '.pants.d')
+
+  @classmethod
+  def _init_engine(cls):
+    if cls._scheduler is not None:
+      return
+
+    cls._local_store_dir = os.path.realpath(safe_mkdtemp())
+    safe_mkdir(cls._local_store_dir)
+
+    # NB: This uses the long form of initialization because it needs to directly specify
+    # `cls.alias_groups` rather than having them be provided by bootstrap options.
+    graph_session = EngineInitializer.setup_legacy_graph_extended(
+      pants_ignore_patterns=None,
+      local_store_dir=cls._local_store_dir,
+      build_file_imports_behavior='allow',
+      native=init_native(),
+      options_bootstrapper=OptionsBootstrapper.create(args=['--pants-config-files=[]']),
+      build_configuration=cls.build_config(),
+      build_ignore_patterns=None,
+    ).new_session(zipkin_trace_v2=False, build_id="buildid_for_test")
+    cls._scheduler = graph_session.scheduler_session
+    cls._build_graph, cls._address_mapper = graph_session.create_build_graph(
+        TargetRoots([]), cls._build_root()
+      )
+
+  @property
+  def scheduler(self):
+    if self._scheduler is None:
+      self._init_engine()
+      self.post_scheduler_init()
+    return self._scheduler
+
+  def post_scheduler_init(self):
+    """Run after initializing the Scheduler, it will have the same lifetime"""
+    pass
+
+  @property
+  def address_mapper(self):
+    if self._address_mapper is None:
+      self._init_engine()
+    return self._address_mapper
+
+  @property
+  def build_graph(self):
+    if self._build_graph is None:
+      self._init_engine()
+    return self._build_graph
+
+  def reset_build_graph(self, reset_build_files=False, delete_build_files=False):
+    """Start over with a fresh build graph with no targets in it."""
+    if delete_build_files or reset_build_files:
+      files = [f for f in self.buildroot_files() if os.path.basename(f) == 'BUILD']
+      if delete_build_files:
+        for f in files:
+          os.remove(os.path.join(self.build_root, f))
+      self.invalidate_for(*files)
+    if self._build_graph is not None:
+      self._build_graph.reset()
+
+  def set_options_for_scope(self, scope, **kwargs):
+    self.options[scope].update(kwargs)
+
+  def context(self, for_task_types=None, for_subsystems=None, options=None,
+              target_roots=None, console_outstream=None, workspace=None,
+              scheduler=None, address_mapper=None, **kwargs):
+    """
+    :API: public
+
+    :param dict **kwargs: keyword arguments passed in to `create_options_for_optionables`.
+    """
+    # Many tests use source root functionality via the SourceRootConfig.global_instance().
+    # (typically accessed via Target.target_base), so we always set it up, for convenience.
+    for_subsystems = set(for_subsystems or ())
+    for subsystem in for_subsystems:
+      if subsystem.options_scope is None:
+        raise TaskError('You must set a scope on your subsystem type before using it in tests.')
+
+    optionables = {SourceRootConfig} | self._build_configuration.optionables() | for_subsystems
+
+    for_task_types = for_task_types or ()
+    for task_type in for_task_types:
+      scope = task_type.options_scope
+      if scope is None:
+        raise TaskError('You must set a scope on your task type before using it in tests.')
+      optionables.add(task_type)
+      # If task is expected to inherit goal-level options, register those directly on the task,
+      # by subclassing the goal options registrar and settings its scope to the task scope.
+      if issubclass(task_type, GoalOptionsMixin):
+        subclass_name = 'test_{}_{}_{}'.format(
+          task_type.__name__, task_type.goal_options_registrar_cls.options_scope,
+          task_type.options_scope)
+        optionables.add(type(subclass_name, (task_type.goal_options_registrar_cls, ),
+                             {'options_scope': task_type.options_scope}))
+
+    # Now expand to all deps.
+    all_optionables = set()
+    for optionable in optionables:
+      all_optionables.update(si.optionable_cls for si in optionable.known_scope_infos())
+
+    # Now default the option values and override with any caller-specified values.
+    # TODO(benjy): Get rid of the options arg, and require tests to call set_options.
+    options = options.copy() if options else {}
+    for s, opts in self.options.items():
+      scoped_opts = options.setdefault(s, {})
+      scoped_opts.update(opts)
+
+    fake_options = create_options_for_optionables(
+      all_optionables, options=options, **kwargs)
+
+    Subsystem.reset(reset_options=True)
+    Subsystem.set_options(fake_options)
+
+    scheduler = scheduler or self.scheduler
+
+    address_mapper = address_mapper or self.address_mapper
+
+    context = create_context_from_options(fake_options,
+                                          target_roots=target_roots,
+                                          build_graph=self.build_graph,
+                                          build_configuration=self._build_configuration,
+                                          address_mapper=address_mapper,
+                                          console_outstream=console_outstream,
+                                          workspace=workspace,
+                                          scheduler=scheduler)
+    return context
+
+  def tearDown(self):
+    """
+    :API: public
+    """
+    super().tearDown()
+    Subsystem.reset()
+
+  @classproperty
+  def subsystems(cls):
+    """Initialize these subsystems when running your test.
+
+    If your test instantiates a target type that depends on any subsystems, those subsystems need to
+    be initialized in your test. You can override this property to return the necessary subsystem
+    classes.
+
+    :rtype: list of type objects, all subclasses of Subsystem
+    """
+    return Target.subsystems()
+
+  def _init_target_subsystem(self):
+    if not self._inited_target:
+      subsystem_util.init_subsystems(self.subsystems)
+      self._inited_target = True
+
+  def target(self, spec):
+    """Resolves the given target address to a Target object.
+
+    :API: public
+
+    address: The BUILD target address to resolve.
+
+    Returns the corresponding Target or else None if the address does not point to a defined Target.
+    """
+    self._init_target_subsystem()
+
+    address = Address.parse(spec)
+    self.build_graph.inject_address_closure(address)
+    return self.build_graph.get_target(address)
+
+  def targets(self, spec):
+    """Resolves a target spec to one or more Target objects.
+
+    :API: public
+
+    spec: Either BUILD target address or else a target glob using the siblings ':' or
+          descendants '::' suffixes.
+
+    Returns the set of all Targets found.
+    """
+
+    spec = CmdLineSpecParser(self.build_root).parse_spec(spec)
+    targets = []
+    for address in self.build_graph.inject_specs_closure([spec]):
+      targets.append(self.build_graph.get_target(address))
+    return targets
+
+  def create_library(self, path, target_type, name, sources=None, **kwargs):
+    """Creates a library target of given type at the BUILD file at path with sources
+
+    :API: public
+
+     path: The relative path to the BUILD file from the build root.
+     target_type: valid pants target type.
+     name: Name of the library target.
+     sources: List of source file at the path relative to path.
+     **kwargs: Optional attributes that can be set for any library target.
+       Currently it includes support for resources, java_sources, provides
+       and dependencies.
+    """
+    if sources:
+      self.create_files(path, sources)
+    self.add_to_build_file(path, dedent('''
+          %(target_type)s(name='%(name)s',
+            %(sources)s
+            %(java_sources)s
+            %(provides)s
+            %(dependencies)s
+          )
+        ''' % dict(target_type=target_type,
+                   name=name,
+                   sources=('sources=%s,' % repr(sources)
+                              if sources else ''),
+                   java_sources=('java_sources=[%s],'
+                                 % ','.join('"%s"' % str_target for str_target in kwargs.get('java_sources'))
+                                 if 'java_sources' in kwargs else ''),
+                   provides=('provides=%s,' % kwargs.get('provides')
+                              if 'provides' in kwargs else ''),
+                   dependencies=('dependencies=%s,' % kwargs.get('dependencies')
+                              if 'dependencies' in kwargs else ''),
+                   )))
+    return self.target('%s:%s' % (path, name))
+
+  def create_resources(self, path, name, *sources):
+    """
+    :API: public
+    """
+    return self.create_library(path, 'resources', name, sources)
+
+  def assertUnorderedPrefixEqual(self, expected, actual_iter):
+    """Consumes len(expected) items from the given iter, and asserts that they match, unordered.
+
+    :API: public
+    """
+    actual = list(itertools.islice(actual_iter, len(expected)))
+    self.assertEqual(sorted(expected), sorted(actual))
+
+  def assertPrefixEqual(self, expected, actual_iter):
+    """Consumes len(expected) items from the given iter, and asserts that they match, in order.
+
+    :API: public
+    """
+    self.assertEqual(expected, list(itertools.islice(actual_iter, len(expected))))
+
+  def assertInFile(self, string, file_path):
+    """Verifies that a string appears in a file
+
+    :API: public
+    """
+
+    with open(file_path, 'r') as f:
+      content = f.read()
+      self.assertIn(string, content, '"{}" is not in the file {}:\n{}'.format(string, f.name, content))
+
+  @contextmanager
+  def assertRaisesWithMessage(self, exception_type, error_text):
+    """Verifies than an exception message is equal to `error_text`.
+
+    :param type exception_type: The exception type which is expected to be raised within the body.
+    :param str error_text: Text that the exception message should match exactly with
+                           `self.assertEqual()`.
+    :API: public
+    """
+    with self.assertRaises(exception_type) as cm:
+      yield cm
+    self.assertEqual(error_text, str(cm.exception))
+
+  @contextmanager
+  def assertRaisesWithMessageContaining(self, exception_type, error_text):
+    """Verifies that the string `error_text` appears in an exception message.
+
+    :param type exception_type: The exception type which is expected to be raised within the body.
+    :param str error_text: Text that the exception message should contain with `self.assertIn()`.
+    :API: public
+    """
+    with self.assertRaises(exception_type) as cm:
+      yield cm
+    self.assertIn(error_text, str(cm.exception))
+
+  def get_bootstrap_options(self, cli_options=()):
+    """Retrieves bootstrap options.
+
+    :param cli_options: An iterable of CLI flags to pass as arguments to `OptionsBootstrapper`.
+    """
+    args = tuple(['--pants-config-files=[]']) + tuple(cli_options)
+    return OptionsBootstrapper.create(args=args).bootstrap_options.for_global_scope()
+
+  def make_snapshot(self, files):
+    """Makes a snapshot from a collection of files.
+
+    :param files: a dictionary, where key=filename, value=file_content where both are of type String.
+    :return: a Snapshot.
+    """
+    with temporary_dir() as temp_dir:
+      for file_name, content in files.items():
+        safe_file_dump(os.path.join(temp_dir, file_name), content)
+      return self.scheduler.capture_snapshots((
+        PathGlobsAndRoot(PathGlobs(('**',)), temp_dir),
+      ))[0]
+
+  class LoggingRecorder:
+    """Simple logging handler to record warnings."""
+
+    def __init__(self):
+      self._records = []
+      self.level = logging.DEBUG
+
+    def handle(self, record):
+      self._records.append(record)
+
+    def _messages_for_level(self, levelname):
+      return ['{}: {}'.format(record.name, record.getMessage())
+              for record in self._records if record.levelname == levelname]
+
+    def infos(self):
+      return self._messages_for_level('INFO')
+
+    def warnings(self):
+      return self._messages_for_level('WARNING')
+
+    def errors(self):
+      return self._messages_for_level('ERROR')
+
+  @contextmanager
+  def captured_logging(self, level=None):
+    root_logger = logging.getLogger()
+
+    old_level = root_logger.level
+    root_logger.setLevel(level or logging.NOTSET)
+
+    handler = self.LoggingRecorder()
+    root_logger.addHandler(handler)
+    try:
+      yield handler
+    finally:
+      root_logger.setLevel(old_level)
+      root_logger.removeHandler(handler)
+
+  @contextmanager
+  def warnings_catcher(self):
+    with warnings.catch_warnings(record=True) as w:
+      warnings.simplefilter('always')
+      yield w
+
+  def assertWarning(self, w, category, warning_text):
+    single_warning = assert_single_element(w)
+    self.assertEqual(single_warning.category, category)
+    warning_message = single_warning.message
+    self.assertEqual(warning_text, str(warning_message))
+
+  def retrieve_single_product_at_target_base(self, product_mapping, target):
+    mapping_for_target = product_mapping.get(target)
+    single_base_dir = assert_single_element(list(mapping_for_target.keys()))
+    single_product = assert_single_element(mapping_for_target[single_base_dir])
+    return single_product
+
+  def populate_target_dict(self, target_map):
+    """Return a dict containing targets with files generated according to `target_map`.
+
+    The keys of `target_map` are target address strings, while the values of `target_map` should be
+    a dict which contains keyword arguments fed into `self.make_target()`, along with a few special
+    keys. Special keys are:
+    - 'key': used to access the target in the returned dict. Defaults to the target address spec.
+    - 'filemap': creates files at the specified relative paths to the target.
+
+    An `OrderedDict` of 2-tuples must be used with the targets topologically ordered, if
+    they have dependencies on each other. Note that dependency cycles are not currently supported
+    with this method.
+
+    :param target_map: Dict mapping each target address to generate -> kwargs for
+                       `self.make_target()`, along with a 'key' and optionally a 'filemap' argument.
+    :return: Dict mapping the required 'key' argument -> target instance for each element of
+             `target_map`.
+    :rtype: dict
+    """
+    target_dict = {}
+
+    # Create a target from each specification and insert it into `target_dict`.
+    for target_spec, target_kwargs in target_map.items():
+      unprocessed_kwargs = target_kwargs.copy()
+
+      target_base = Address.parse(target_spec).spec_path
+
+      # Populate the target's owned files from the specification.
+      filemap = unprocessed_kwargs.pop('filemap', {})
+      for rel_path, content in filemap.items():
+        buildroot_path = os.path.join(target_base, rel_path)
+        self.create_file(buildroot_path, content)
+
+      # Ensure any dependencies exist in the target dict (`target_map` must then be an
+      # OrderedDict).
+      # The 'key' is used to access the target in `target_dict`, and defaults to `target_spec`.
+      target_address = Address.parse(target_spec)
+      key = unprocessed_kwargs.pop('key', target_address.target_name)
+      dep_targets = []
+      for dep_spec in unprocessed_kwargs.pop('dependencies', []):
+        existing_tgt_key = target_map[dep_spec]['key']
+        dep_targets.append(target_dict[existing_tgt_key])
+
+      # Register the generated target.
+      generated_target = self.make_target(
+        spec=target_spec, dependencies=dep_targets, **unprocessed_kwargs)
+      target_dict[key] = generated_target
+
+    return target_dict
diff --git a/tests/python/pants_test/BUILD b/tests/python/pants_test/BUILD
index f024912a53a..6b4d4fcf5f6 100644
--- a/tests/python/pants_test/BUILD
+++ b/tests/python/pants_test/BUILD
@@ -4,46 +4,33 @@
 python_library(
   name='test_infra',
   dependencies=[
-    'tests/python/pants_test:int-test-for-export',
-    'tests/python/pants_test:test_base',
+    ':int-test-for-export',
+    ':test_base',
     'tests/python/pants_test/jvm:jar_task_test_base',
     'tests/python/pants_test/jvm:nailgun_task_test_base',
     'tests/python/pants_test/jvm:jvm_tool_task_test_base',
     'tests/python/pants_test/engine:engine_test_base',
+    'tests/python/pants_test/engine:util',
     'tests/python/pants_test/subsystem:subsystem_utils',
+    'tests/python/pants_test/base:context_utils',
+    'tests/python/pants_test/option/util',
+    'tests/python/pants_test/testutils:file_test_util',
   ],
   provides=pants_setup_py(
     name='pantsbuild.pants.testinfra',
-    description='Test support for writing pants plugins.',
+    description='Test support for writing Pants plugins.',
     namespace_packages=['pants_test'],
     additional_classifiers=[
       'Topic :: Software Development :: Testing',
-    ]
-  )
+    ],
+  ),
 )
 
 python_library(
   name = 'int-test-for-export',
-  sources = [
-    'pants_run_integration_test.py'
-  ],
+  sources = ['pants_run_integration_test.py'],
   dependencies = [
-    '//:build_root',
-    '//:pants_pex',
-    '3rdparty/python:ansicolors',
-    '3rdparty/python:dataclasses',
-    'src/python/pants/base:build_environment',
-    'src/python/pants/base:build_file',
-    'src/python/pants/base:exiter',
-    'src/python/pants/fs',
-    'src/python/pants/subsystem',
-    'src/python/pants/util:contextutil',
-    'src/python/pants/util:dirutil',
-    'src/python/pants/util:osutil',
-    'src/python/pants/util:process_handler',
-    'src/python/pants/util:strutil',
-    'src/python/pants:entry_point',
-    'tests/python/pants_test/testutils:file_test_util',
+    'src/python/pants/testutil:int-test-for-export',
   ]
 )
 
@@ -51,13 +38,7 @@ target(
   name = 'int-test',
   dependencies=[
     ':int-test-for-export',
-    # NB: 'pants_run_integration_test.py' runs ./pants in a subprocess, so test results will depend
-    # on the pants binary and all of its transitive dependencies. Adding the dependencies below is
-    # our best proxy for ensuring that any test target depending on this target will be invalidated
-    # on changes to those undeclared dependencies.
-    'src/python/pants/bin:pants_local_binary',
-    'src/rust/engine',
-    '//:pyproject',
+    'src/python/pants/testutil:int-test',
   ],
 )
 
@@ -65,20 +46,7 @@ python_library(
   name = 'test_base',
   sources = ['test_base.py'],
   dependencies = [
-    'src/python/pants/base:build_root',
-    'src/python/pants/base:cmd_line_spec_parser',
-    'src/python/pants/base:exceptions',
-    'src/python/pants/build_graph',
-    'src/python/pants/init',
-    'src/python/pants/source',
-    'src/python/pants/subsystem',
-    'src/python/pants/task',
-    'src/python/pants/util:dirutil',
-    'src/python/pants/util:memo',
-    'src/python/pants/util:meta',
-    'tests/python/pants_test/base:context_utils',
-    'tests/python/pants_test/engine:util',
-    'tests/python/pants_test/option/util',
+    'src/python/pants/testutil:test_base',
   ]
 )
 
@@ -86,9 +54,7 @@ python_library(
   name = 'console_rule_test_base',
   sources = ['console_rule_test_base.py'],
   dependencies = [
-    ':test_base',
-    'src/python/pants/bin',
-    'src/python/pants/init',
+    'src/python/pants/testutil:console_rule_test_base',
   ]
 )
 
@@ -97,14 +63,15 @@ python_library(
   name = 'task_test_base',
   sources = ['task_test_base.py'],
   dependencies = [
-    '3rdparty/python:dataclasses',
-    'src/python/pants/goal:context',
-    'src/python/pants/ivy',
-    'src/python/pants/task',
-    'src/python/pants/util:contextutil',
-    'src/python/pants/util:memo',
-    'src/python/pants/util:meta',
-    'tests/python/pants_test:test_base',
+    'src/python/pants/testutil:task_test_base',
+  ]
+)
+
+python_library(
+  name='interpreter_selection_utils',
+  sources=['interpreter_selection_utils.py'],
+  dependencies = [
+    'src/python/pants/testutil:interpreter_selection_utils',
   ]
 )
 
@@ -119,8 +86,3 @@ python_tests(
     'tests/python/pants_test/subsystem:subsystem_utils',
   ]
 )
-
-python_library(
-  name='interpreter_selection_utils',
-  sources=['interpreter_selection_utils.py'],
-)
diff --git a/tests/python/pants_test/backend/codegen/wire/java/BUILD b/tests/python/pants_test/backend/codegen/wire/java/BUILD
index 7d3cb68aac0..4d029fe63c2 100644
--- a/tests/python/pants_test/backend/codegen/wire/java/BUILD
+++ b/tests/python/pants_test/backend/codegen/wire/java/BUILD
@@ -26,7 +26,9 @@ python_tests(
   sources = globs('*_integration.py'),
   dependencies = [
     'src/python/pants/base:build_environment',
+    'src/python/pants/util:contextutil',
     'tests/python/pants_test:int-test',
+    'tests/python/pants_test/testutils:file_test_util',
     'examples/src/java/org/pantsbuild/example:wire_directory',
   ],
   tags = {'integration'},
diff --git a/tests/python/pants_test/backend/jvm/subsystems/BUILD b/tests/python/pants_test/backend/jvm/subsystems/BUILD
index 08b28a320e0..db0efc44d21 100644
--- a/tests/python/pants_test/backend/jvm/subsystems/BUILD
+++ b/tests/python/pants_test/backend/jvm/subsystems/BUILD
@@ -42,7 +42,7 @@ python_tests(
     'src/python/pants/backend/jvm/targets:scala',
     'src/python/pants/backend/jvm/tasks:scalastyle',
     'src/python/pants/backend/jvm/tasks/jvm_compile/zinc',
-    'tests/python/pants_test/jvm:nailgun_task_test_base',
+    'tests/python/pants_test:test_base',
     'tests/python/pants_test/subsystem:subsystem_utils',
   ]
 )
diff --git a/tests/python/pants_test/backend/jvm/tasks/BUILD b/tests/python/pants_test/backend/jvm/tasks/BUILD
index c0585a7071a..b4dc8a8b790 100644
--- a/tests/python/pants_test/backend/jvm/tasks/BUILD
+++ b/tests/python/pants_test/backend/jvm/tasks/BUILD
@@ -37,6 +37,7 @@ python_tests(
     'src/python/pants/task',
     'src/python/pants/util:contextutil',
     'tests/python/pants_test/jvm:jvm_tool_task_test_base',
+    'tests/python/pants_test/subsystem:subsystem_utils',
   ]
 )
 
@@ -276,7 +277,7 @@ python_tests(
     'tests/python/pants_test/jvm:jvm_tool_task_test_base',
     'tests/python/pants_test/jvm:nailgun_task_test_base',
     'tests/python/pants_test/subsystem:subsystem_utils',
-    'tests/python/pants_test:test_base',
+    'tests/python/pants_test:task_test_base',
   ]
 )
 
@@ -298,7 +299,7 @@ python_tests(
     'tests/python/pants_test/jvm:jvm_tool_task_test_base',
     'tests/python/pants_test/jvm:nailgun_task_test_base',
     'tests/python/pants_test/subsystem:subsystem_utils',
-    'tests/python/pants_test:test_base',
+    'tests/python/pants_test:task_test_base',
   ]
 )
 
@@ -362,6 +363,7 @@ python_tests(
     'tests/python/pants_test/base:context_utils',
     'tests/python/pants_test/jvm:jar_task_test_base',
     'tests/python/pants_test:task_test_base',
+    'tests/python/pants_test/subsystem:subsystem_utils',
   ],
 )
 
@@ -570,7 +572,7 @@ python_tests(
     'src/python/pants/backend/jvm/targets:jvm',
     'src/python/pants/backend/jvm/tasks:run_jvm_prep_command',
     'src/python/pants/util:contextutil',
-    'tests/python/pants_test/jvm:jvm_task_test_base',
+    'tests/python/pants_test:task_test_base',
   ]
 )
 
diff --git a/tests/python/pants_test/backend/jvm/tasks/coverage/BUILD b/tests/python/pants_test/backend/jvm/tasks/coverage/BUILD
index b782fd47711..b8fc134ae3c 100644
--- a/tests/python/pants_test/backend/jvm/tasks/coverage/BUILD
+++ b/tests/python/pants_test/backend/jvm/tasks/coverage/BUILD
@@ -6,6 +6,6 @@ python_tests(
     'src/python/pants/backend/jvm/targets:java',
     'src/python/pants/backend/jvm/tasks/coverage',
     'src/python/pants/java/jar',
-    'tests/python/pants_test/backend/jvm/tasks:jvm_binary_task_test_base',
+    'tests/python/pants_test:test_base',
   ]
 )
diff --git a/tests/python/pants_test/backend/jvm/tasks/jvm_compile/rsc/BUILD b/tests/python/pants_test/backend/jvm/tasks/jvm_compile/rsc/BUILD
index 5312f4dbaf3..8a853cd4c53 100644
--- a/tests/python/pants_test/backend/jvm/tasks/jvm_compile/rsc/BUILD
+++ b/tests/python/pants_test/backend/jvm/tasks/jvm_compile/rsc/BUILD
@@ -48,5 +48,6 @@ python_tests(
     'src/python/pants/backend/jvm/tasks/jvm_compile/rsc',
     'src/python/pants/backend/jvm/tasks:classpath_products',
     'tests/python/pants_test/jvm:nailgun_task_test_base',
+    'tests/python/pants_test/subsystem:subsystem_utils',
   ],
 )
diff --git a/tests/python/pants_test/backend/project_info/tasks/BUILD b/tests/python/pants_test/backend/project_info/tasks/BUILD
index 7b738f8b460..70ccbc8d75d 100644
--- a/tests/python/pants_test/backend/project_info/tasks/BUILD
+++ b/tests/python/pants_test/backend/project_info/tasks/BUILD
@@ -35,6 +35,7 @@ python_tests(
     'src/python/pants/backend/python/targets',
     'src/python/pants/build_graph',
     'src/python/pants/subsystem',
+    'tests/python/pants_test/subsystem:subsystem_utils',
     'tests/python/pants_test:task_test_base',
   ]
 )
@@ -102,6 +103,7 @@ python_tests(
     'src/python/pants/backend/jvm/targets:java',
     'src/python/pants/backend/project_info/tasks:filedeps',
     'src/python/pants/build_graph',
+    'tests/python/pants_test:test_base',
     'tests/python/pants_test:task_test_base',
   ],
 )
diff --git a/tests/python/pants_test/backend/python/tasks/BUILD b/tests/python/pants_test/backend/python/tasks/BUILD
index e514b70f55b..d7a38f455f5 100644
--- a/tests/python/pants_test/backend/python/tasks/BUILD
+++ b/tests/python/pants_test/backend/python/tasks/BUILD
@@ -9,7 +9,8 @@ python_library(
     'src/python/pants/backend/python/targets',
     'src/python/pants/build_graph',
     'src/python/pants/subsystem',
-    'tests/python/pants_test:task_test_base'
+    'tests/python/pants_test/subsystem:subsystem_utils',
+    'tests/python/pants_test:task_test_base',
   ]
 )
 
diff --git a/tests/python/pants_test/base/BUILD b/tests/python/pants_test/base/BUILD
index ce52c17825c..4f774976a70 100644
--- a/tests/python/pants_test/base/BUILD
+++ b/tests/python/pants_test/base/BUILD
@@ -49,12 +49,7 @@ python_library(
   name = 'context_utils',
   sources = ['context_utils.py'],
   dependencies = [
-    '3rdparty/python/twitter/commons:twitter.common.collections',
-    'src/python/pants/base:workunit',
-    'src/python/pants/build_graph',
-    'src/python/pants/goal:context',
-    'src/python/pants/goal:run_tracker',
-    'tests/python/pants_test/option/util',
+    'src/python/pants/testutil/base:context_utils',
   ]
 )
 
diff --git a/tests/python/pants_test/base/context_utils.py b/tests/python/pants_test/base/context_utils.py
index d482bfe6c5d..c16ef436cfa 100644
--- a/tests/python/pants_test/base/context_utils.py
+++ b/tests/python/pants_test/base/context_utils.py
@@ -1,131 +1,7 @@
 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
 # Licensed under the Apache License, Version 2.0 (see LICENSE).
 
-import logging
-import sys
-from contextlib import contextmanager
-
-from twitter.common.collections import maybe_list
-
-from pants.base.workunit import WorkUnit
-from pants.build_graph.target import Target
-from pants.goal.context import Context
-from pants.goal.run_tracker import RunTrackerLogger
-
-
-class TestContext(Context):
-  """A Context to use during unittesting.
-
-  :API: public
-
-  Stubs out various dependencies that we don't want to introduce in unit tests.
-
-  TODO: Instead of extending the runtime Context class, create a Context interface and have
-  TestContext and a runtime Context implementation extend that. This will also allow us to
-  isolate the parts of the interface that a Task is allowed to use vs. the parts that the
-  task-running machinery is allowed to use.
-  """
-  class DummyWorkUnit:
-    """A workunit stand-in that sends all output to stderr.
-
-   These outputs are typically only used by subprocesses spawned by code under test, not
-   the code under test itself, and would otherwise go into some reporting black hole.  The
-   testing framework will only display the stderr output when a test fails.
-
-   Provides no other tracking/labeling/reporting functionality. Does not require "opening"
-   or "closing".
-   """
-
-    def output(self, name):
-      return sys.stderr
-
-    def set_outcome(self, outcome):
-      return sys.stderr.write('\nWorkUnit outcome: {}\n'.format(WorkUnit.outcome_string(outcome)))
-
-  class DummyRunTracker:
-    """A runtracker stand-in that does no actual tracking."""
-
-    def __init__(self):
-      self.logger = RunTrackerLogger(self)
-
-    class DummyArtifactCacheStats:
-      def add_hits(self, cache_name, targets): pass
-
-      def add_misses(self, cache_name, targets, causes): pass
-
-    artifact_cache_stats = DummyArtifactCacheStats()
-
-    def report_target_info(self, scope, target, keys, val): pass
-
-
-  class TestLogger(logging.getLoggerClass()):
-    """A logger that converts our structured records into flat ones.
-
-    This is so we can use a regular logger in tests instead of our reporting machinery.
-    """
-
-    def makeRecord(self, name, lvl, fn, lno, msg, args, exc_info, *pos_args, **kwargs):
-      # Python 2 and Python 3 have different arguments for makeRecord().
-      # For cross-compatibility, we are unpacking arguments.
-      # See https://stackoverflow.com/questions/44329421/logging-makerecord-takes-8-positional-arguments-but-11-were-given.
-      msg = ''.join([msg] + [a[0] if isinstance(a, (list, tuple)) else a for a in args])
-      args = []
-      return super(TestContext.TestLogger, self).makeRecord(
-        name, lvl, fn, lno, msg, args, exc_info, *pos_args, **kwargs)
-
-  def __init__(self, *args, **kwargs):
-    super().__init__(*args, **kwargs)
-    logger_cls = logging.getLoggerClass()
-    try:
-      logging.setLoggerClass(self.TestLogger)
-      self._logger = logging.getLogger('test')
-    finally:
-      logging.setLoggerClass(logger_cls)
-
-  @contextmanager
-  def new_workunit(self, name, labels=None, cmd='', log_config=None):
-    """
-    :API: public
-    """
-    sys.stderr.write('\nStarting workunit {}\n'.format(name))
-    yield TestContext.DummyWorkUnit()
-
-  @property
-  def log(self):
-    """
-    :API: public
-    """
-    return self._logger
-
-  def submit_background_work_chain(self, work_chain, parent_workunit_name=None):
-    """
-    :API: public
-    """
-    # Just do the work synchronously, so we don't need a run tracker, background workers and so on.
-    for work in work_chain:
-      for args_tuple in work.args_tuples:
-        work.func(*args_tuple)
-
-  def subproc_map(self, f, items):
-    """
-    :API: public
-    """
-    # Just execute in-process.
-    return list(map(f, items))
-
-
-def create_context_from_options(options, target_roots=None, build_graph=None,
-                                build_configuration=None, address_mapper=None,
-                                console_outstream=None, workspace=None, scheduler=None):
-  """Creates a ``Context`` with the given options and no targets by default.
-
-  :param options: An :class:`pants.option.options.Option`-alike object that supports read methods.
-
-  Other params are as for ``Context``.
-  """
-  run_tracker = TestContext.DummyRunTracker()
-  target_roots = maybe_list(target_roots, Target) if target_roots else []
-  return TestContext(options=options, run_tracker=run_tracker, target_roots=target_roots,
-                     build_graph=build_graph, build_configuration=build_configuration,
-                     address_mapper=address_mapper, console_outstream=console_outstream,
-                     workspace=workspace, scheduler=scheduler)
+from pants.testutil.base.context_utils import TestContext as TestContext  # noqa
+from pants.testutil.base.context_utils import (
+  create_context_from_options as create_context_from_options,
+)  # noqa
diff --git a/tests/python/pants_test/cache/BUILD b/tests/python/pants_test/cache/BUILD
index 2ddb831b755..074af1505f2 100644
--- a/tests/python/pants_test/cache/BUILD
+++ b/tests/python/pants_test/cache/BUILD
@@ -35,6 +35,7 @@ python_tests(
     'src/python/pants/util:contextutil',
     'src/python/pants/task',
     'tests/python/pants_test:test_base',
+    'tests/python/pants_test/option/util',
     'tests/python/pants_test/testutils:mock_logger',
   ]
 )
diff --git a/tests/python/pants_test/console_rule_test_base.py b/tests/python/pants_test/console_rule_test_base.py
index 0561480a450..f3b072a6bf2 100644
--- a/tests/python/pants_test/console_rule_test_base.py
+++ b/tests/python/pants_test/console_rule_test_base.py
@@ -1,135 +1,4 @@
 # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
 # Licensed under the Apache License, Version 2.0 (see LICENSE).
 
-from io import StringIO
-from typing import Any, List
-
-from pants.engine.console import Console
-from pants.engine.fs import Workspace
-from pants.engine.goal import Goal
-from pants.engine.selectors import Params
-from pants.init.options_initializer import BuildConfigInitializer
-from pants.init.target_roots_calculator import TargetRootsCalculator
-from pants.option.options_bootstrapper import OptionsBootstrapper
-from pants.util.meta import classproperty
-from pants_test.test_base import TestBase
-
-
-class ConsoleRuleTestBase(TestBase):
-  """A baseclass useful for testing a Goal defined as a @console_rule.
-
-  :API: public
-  """
-
-  _implicit_args = tuple(['--pants-config-files=[]'])
-
-  @classproperty
-  def goal_cls(cls):
-    """Subclasses must return the Goal type to test.
-
-    :API: public
-    """
-    raise NotImplementedError()
-
-  def setUp(self):
-    super().setUp()
-
-    if not issubclass(self.goal_cls, Goal):
-      raise AssertionError('goal_cls() must return a Goal subclass, got {}'.format(self.goal_cls))
-
-  def execute_rule(self, args=tuple(), env=tuple(), exit_code=0, additional_params: List[Any]=[]):
-    """Executes the @console_rule for this test class.
-
-    :API: public
-
-    Returns the text output of the task.
-    """
-    # Create an OptionsBootstrapper for these args/env, and a captured Console instance.
-    args = self._implicit_args + (self.goal_cls.name,) + tuple(args)
-    env = dict(env)
-    options_bootstrapper = OptionsBootstrapper.create(args=args, env=env)
-    BuildConfigInitializer.get(options_bootstrapper)
-    full_options = options_bootstrapper.get_full_options(list(self.goal_cls.Options.known_scope_infos()))
-    stdout, stderr = StringIO(), StringIO()
-    console = Console(stdout=stdout, stderr=stderr)
-    scheduler = self.scheduler
-    workspace = Workspace(scheduler)
-
-    # Run for the target specs parsed from the args.
-    specs = TargetRootsCalculator.parse_specs(full_options.target_specs, self.build_root)
-    params = Params(specs, console, options_bootstrapper, workspace, *additional_params)
-    actual_exit_code = self.scheduler.run_console_rule(self.goal_cls, params)
-
-    # Flush and capture console output.
-    console.flush()
-    stdout = stdout.getvalue()
-    stderr = stderr.getvalue()
-
-    self.assertEqual(
-        exit_code,
-        actual_exit_code,
-        "Exited with {} (expected {}):\nstdout:\n{}\nstderr:\n{}".format(actual_exit_code, exit_code, stdout, stderr)
-      )
-
-    return stdout
-
-  def assert_entries(self, sep, *output, **kwargs):
-    """Verifies the expected output text is flushed by the console task under test.
-
-    NB: order of entries is not tested, just presence.
-
-    :API: public
-
-    sep:      the expected output separator.
-    *output:  the output entries expected between the separators
-    **kwargs: additional kwargs passed to execute_rule.
-    """
-    # We expect each output line to be suffixed with the separator, so for , and [1,2,3] we expect:
-    # '1,2,3,' - splitting this by the separator we should get ['1', '2', '3', ''] - always an extra
-    # empty string if the separator is properly always a suffix and not applied just between
-    # entries.
-    self.assertEqual(sorted(list(output) + ['']), sorted((self.execute_rule(**kwargs)).split(sep)))
-
-  def assert_console_output(self, *output, **kwargs):
-    """Verifies the expected output entries are emitted by the console task under test.
-
-    NB: order of entries is not tested, just presence.
-
-    :API: public
-
-    *output:  the expected output entries
-    **kwargs: additional kwargs passed to execute_rule.
-    """
-    self.assertEqual(sorted(output), sorted(self.execute_rule(**kwargs).splitlines()))
-
-  def assert_console_output_contains(self, output, **kwargs):
-    """Verifies the expected output string is emitted by the console task under test.
-
-    :API: public
-
-    output:  the expected output entry(ies)
-    **kwargs: additional kwargs passed to execute_rule.
-    """
-    self.assertIn(output, self.execute_rule(**kwargs))
-
-  def assert_console_output_ordered(self, *output, **kwargs):
-    """Verifies the expected output entries are emitted by the console task under test.
-
-    NB: order of entries is tested.
-
-    :API: public
-
-    *output:  the expected output entries in expected order
-    **kwargs: additional kwargs passed to execute_rule.
-    """
-    self.assertEqual(list(output), self.execute_rule(**kwargs).splitlines())
-
-  def assert_console_raises(self, exception, **kwargs):
-    """Verifies the expected exception is raised by the console task under test.
-
-    :API: public
-
-    **kwargs: additional kwargs are passed to execute_rule.
-    """
-    with self.assertRaises(exception):
-      self.execute_rule(**kwargs)
+from pants.testutil.console_rule_test_base import ConsoleRuleTestBase as ConsoleRuleTestBase  # noqa
diff --git a/tests/python/pants_test/core_tasks/BUILD b/tests/python/pants_test/core_tasks/BUILD
index ddab8f515e3..c210e54bb48 100644
--- a/tests/python/pants_test/core_tasks/BUILD
+++ b/tests/python/pants_test/core_tasks/BUILD
@@ -55,6 +55,7 @@ python_tests(
     'src/python/pants/util:contextutil',
     'src/python/pants/util:dirutil',
     'tests/python/pants_test:task_test_base',
+    'tests/python/pants_test/subsystem:subsystem_utils',
   ]
 )
 
diff --git a/tests/python/pants_test/engine/BUILD b/tests/python/pants_test/engine/BUILD
index 2008373ff16..a760b89bd3a 100644
--- a/tests/python/pants_test/engine/BUILD
+++ b/tests/python/pants_test/engine/BUILD
@@ -5,17 +5,7 @@ python_library(
   name = 'util',
   sources = ['util.py'],
   dependencies = [
-    '3rdparty/python:ansicolors',
-    'src/python/pants/binaries',
-    'src/python/pants/engine:addressable',
-    'src/python/pants/engine:native',
-    'src/python/pants/engine:parser',
-    'src/python/pants/engine:rules',
-    'src/python/pants/engine:scheduler',
-    'src/python/pants/engine:struct',
-    'src/python/pants/util:objects',
-    'tests/python/pants_test/option/util',
-    'tests/python/pants_test/subsystem:subsystem_utils',
+    'src/python/pants/testutil/engine:util',
   ],
 )
 
@@ -23,9 +13,7 @@ python_library(
   name = 'engine_test_base',
   sources = ['base_engine_test.py'],
   dependencies = [
-    'src/python/pants/goal',
-    'src/python/pants/goal:task_registrar',
-    'tests/python/pants_test:test_base',
+    'src/python/pants/testutil/engine:engine_test_base',
   ]
 )
 
diff --git a/tests/python/pants_test/engine/base_engine_test.py b/tests/python/pants_test/engine/base_engine_test.py
index 83cb4bdc7e1..9263dbbb280 100644
--- a/tests/python/pants_test/engine/base_engine_test.py
+++ b/tests/python/pants_test/engine/base_engine_test.py
@@ -1,64 +1,4 @@
 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
 # Licensed under the Apache License, Version 2.0 (see LICENSE).
 
-from pants.goal.goal import Goal
-from pants.goal.task_registrar import TaskRegistrar
-from pants_test.test_base import TestBase
-
-
-class EngineTestBase(TestBase):
-  """
-  :API: public
-  """
-
-  @classmethod
-  def as_goal(cls, goal_name):
-    """Returns a ``Goal`` object of the given name
-
-    :API: public
-    """
-    return Goal.by_name(goal_name)
-
-  @classmethod
-  def as_goals(cls, *goal_names):
-    """Converts the given goal names to a list of ``Goal`` objects.
-
-    :API: public
-    """
-    return [cls.as_goal(goal_name) for goal_name in goal_names]
-
-  @classmethod
-  def install_task(cls, name, action=None, dependencies=None, goal=None):
-    """Creates and installs a task with the given name.
-
-    :API: public
-
-    :param string name: The task name.
-    :param action: The task's action.
-    :param list dependencies: The list of goal names the task depends on, if any.
-    :param string goal: The name of the goal to install the task in, if different from the task
-                        name.
-    :returns The ``Goal`` object with task installed.
-    """
-    return TaskRegistrar(name,
-                         action=action or (lambda: None),
-                         dependencies=dependencies or []).install(goal if goal is not None else None)
-
-  def setUp(self):
-    """
-    :API: public
-    """
-    super().setUp()
-
-    # TODO(John Sirois): Now that the BuildFileParser controls goal registration by iterating
-    # over plugin callbacks a GoalRegistry can be constructed by it and handed to all these
-    # callbacks in place of having a global Goal registry.  Remove the Goal static cling.
-    Goal.clear()
-
-  def tearDown(self):
-    """
-    :API: public
-    """
-    Goal.clear()
-
-    super().tearDown()
+from pants.testutil.engine.base_engine_test import EngineTestBase as EngineTestBase  # noqa
diff --git a/tests/python/pants_test/engine/util.py b/tests/python/pants_test/engine/util.py
index 5adf216b223..c701c2a0720 100644
--- a/tests/python/pants_test/engine/util.py
+++ b/tests/python/pants_test/engine/util.py
@@ -1,171 +1,13 @@
 # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
 # Licensed under the Apache License, Version 2.0 (see LICENSE).
 
-import os
-import re
-from io import StringIO
-from types import GeneratorType
-
-from colors import blue, green, red
-
-from pants.base.file_system_project_tree import FileSystemProjectTree
-from pants.engine.addressable import addressable_list
-from pants.engine.native import Native
-from pants.engine.parser import SymbolTable
-from pants.engine.scheduler import Scheduler
-from pants.engine.selectors import Get
-from pants.engine.struct import Struct
-from pants.option.global_options import DEFAULT_EXECUTION_OPTIONS
-from pants.util.objects import SubclassesOf
-
-
-def run_rule(rule, *args):
-  """A test helper function that runs an @rule with a set of arguments and Get providers.
-
-  An @rule named `my_rule` that takes one argument and makes no `Get` requests can be invoked
-  like so (although you could also just invoke it directly):
-  ```
-  return_value = run_rule(my_rule, arg1)
-  ```
-
-  In the case of an @rule that makes Get requests, things get more interesting: an extra argument
-  is required that represents a dict mapping (product, subject) type pairs to one argument functions
-  that take a subject value and return a product value.
-
-  So in the case of an @rule named `my_co_rule` that takes one argument and makes Get requests
-  for product and subject types (Listing, Dir), the invoke might look like:
-  ```
-  return_value = run_rule(my_co_rule, arg1, {(Listing, Dir): lambda x: Listing(..)})
-  ```
-
-  :returns: The return value of the completed @rule.
-  """
-
-  task_rule = getattr(rule, 'rule', None)
-  if task_rule is None:
-    raise TypeError('Expected to receive a decorated `@rule`; got: {}'.format(rule))
-
-  gets_len = len(task_rule.input_gets)
-
-  if len(args) != len(task_rule.input_selectors) + (1 if gets_len else 0):
-    raise ValueError('Rule expected to receive arguments of the form: {}; got: {}'.format(
-      task_rule.input_selectors, args))
-
-  args, get_providers = (args[:-1], args[-1]) if gets_len > 0 else (args, {})
-  if gets_len != len(get_providers):
-    raise ValueError('Rule expected to receive Get providers for {}; got: {}'.format(
-      task_rule.input_gets, get_providers))
-
-  res = rule(*args)
-  if not isinstance(res, GeneratorType):
-    return res
-
-  def get(product, subject):
-    provider = get_providers.get((product, type(subject)))
-    if provider is None:
-      raise AssertionError('Rule requested: Get{}, which cannot be satisfied.'.format(
-        (product, type(subject), subject)))
-    return provider(subject)
-
-  rule_coroutine = res
-  rule_input = None
-  while True:
-    res = rule_coroutine.send(rule_input)
-    if isinstance(res, Get):
-      rule_input = get(res.product, res.subject)
-    elif type(res) in (tuple, list):
-      rule_input = [get(g.product, g.subject) for g in res]
-    else:
-      return res
-
-
-def init_native():
-  """Return the `Native` instance."""
-  return Native()
-
-
-def create_scheduler(rules, union_rules=None, validate=True, native=None):
-  """Create a Scheduler."""
-  native = native or init_native()
-  return Scheduler(
-    native,
-    FileSystemProjectTree(os.getcwd()),
-    './.pants.d',
-    rules,
-    union_rules,
-    execution_options=DEFAULT_EXECUTION_OPTIONS,
-    validate=validate,
-  )
-
-
-class Target(Struct):
-  def __init__(self, name=None, configurations=None, **kwargs):
-    super().__init__(name=name, **kwargs)
-    self.configurations = configurations
-
-  @addressable_list(SubclassesOf(Struct))
-  def configurations(self):
-    pass
-
-
-TARGET_TABLE = SymbolTable({'struct': Struct, 'target': Target})
-
-
-def assert_equal_with_printing(test_case, expected, actual):
-  """Asserts equality, but also prints the values so they can be compared on failure.
-
-  Usage:
-
-     class FooTest(unittest.TestCase):
-       assert_equal_with_printing = assert_equal_with_printing
-
-       def test_foo(self):
-         self.assert_equal_with_printing("a", "b")
-  """
-  str_actual = str(actual)
-  print('Expected:')
-  print(expected)
-  print('Actual:')
-  print(str_actual)
-  test_case.assertEqual(expected, str_actual)
-
-
-def remove_locations_from_traceback(trace):
-  location_pattern = re.compile('"/.*", line \d+')
-  address_pattern = re.compile('0x[0-9a-f]+')
-  new_trace = location_pattern.sub('LOCATION-INFO', trace)
-  new_trace = address_pattern.sub('0xEEEEEEEEE', new_trace)
-  return new_trace
-
-
-class MockConsole:
-  """An implementation of pants.engine.console.Console which captures output."""
-
-  def __init__(self, use_colors=True):
-    self.stdout = StringIO()
-    self.stderr = StringIO()
-    self._use_colors = use_colors
-
-  def write_stdout(self, payload):
-    self.stdout.write(payload)
-
-  def write_stderr(self, payload):
-    self.stderr.write(payload)
-
-  def print_stdout(self, payload):
-    print(payload, file=self.stdout)
-
-  def print_stderr(self, payload):
-    print(payload, file=self.stderr)
-
-  def _safe_color(self, text, color):
-    return color(text) if self._use_colors else text
-
-  def blue(self, text):
-    return self._safe_color(text, blue)
-
-  def green(self, text):
-    return self._safe_color(text, green)
-
-  def red(self, text):
-    return self._safe_color(text, red)
+from pants.testutil.engine.util import TARGET_TABLE as TARGET_TABLE  # noqa
+from pants.testutil.engine.util import MockConsole as MockConsole  # noqa
+from pants.testutil.engine.util import Target as Target  # noqa
+from pants.testutil.engine.util import assert_equal_with_printing as assert_equal_with_printing  # noqa
+from pants.testutil.engine.util import create_scheduler as create_scheduler  # noqa
+from pants.testutil.engine.util import init_native as init_native  # noqa
+from pants.testutil.engine.util import (
+  remove_locations_from_traceback as remove_locations_from_traceback,
+)  # noqa
+from pants.testutil.engine.util import run_rule as run_rule  # noqa
diff --git a/tests/python/pants_test/interpreter_selection_utils.py b/tests/python/pants_test/interpreter_selection_utils.py
index 4fa560502a9..1432b0f6979 100644
--- a/tests/python/pants_test/interpreter_selection_utils.py
+++ b/tests/python/pants_test/interpreter_selection_utils.py
@@ -1,85 +1,33 @@
 # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
 # Licensed under the Apache License, Version 2.0 (see LICENSE).
 
-import os
-import subprocess
-from unittest import skipIf
-
-
-PY_2 = '2'
-PY_3 = '3'
-
-PY_27 = '2.7'
-PY_36 = '3.6'
-PY_37 = '3.7'
-
-
-def has_python_version(version):
-  """Returns `True` if the current system has the specified version of python.
-
-  :param string version: A python version string, such as 2.7, 3.
-  """
-  # TODO: Tests that skip unless a python interpreter is present often need the path to that
-  # interpreter, and so end up calling python_interpreter_path again. Find a way to streamline this.
-  return python_interpreter_path(version) is not None
-
-
-def python_interpreter_path(version):
-  """Returns the interpreter path if the current system has the specified version of python.
-
-  :param string version: A python version string, such as 2.7, 3.
-  :returns: the normalized path to the interpreter binary if found; otherwise `None`
-  :rtype: string
-  """
-  try:
-    command = ['python{}'.format(version), '-c', 'import sys; print(sys.executable)']
-    py_path = subprocess.check_output(command).decode().strip()
-    return os.path.realpath(py_path)
-  except (subprocess.CalledProcessError, FileNotFoundError):
-    return None
-
-
-def skip_unless_all_pythons_present(*versions):
-  """A decorator that only runs the decorated test method if all of the specified pythons are present.
-
-  :param string *versions: Python version strings, such as 2.7, 3.
-  """
-  missing_versions = [v for v in versions if not has_python_version(v)]
-  if len(missing_versions) == 1:
-    return skipIf(True, 'Could not find python {} on system. Skipping.'.format(missing_versions[0]))
-  elif len(missing_versions) > 1:
-    return skipIf(True,
-                  'Skipping due to the following missing required pythons: {}'
-                  .format(', '.join(missing_versions)))
-  else:
-    return skipIf(False, 'All required pythons present, continuing with test!')
-
-
-def skip_unless_python27_present(func):
-  """A test skip decorator that only runs a test method if python2.7 is present."""
-  return skip_unless_all_pythons_present(PY_27)(func)
-
-
-def skip_unless_python3_present(func):
-  """A test skip decorator that only runs a test method if python3 is present."""
-  return skip_unless_all_pythons_present(PY_3)(func)
-
-
-def skip_unless_python36_present(func):
-  """A test skip decorator that only runs a test method if python3.6 is present."""
-  return skip_unless_all_pythons_present(PY_36)(func)
-
-
-def skip_unless_python27_and_python3_present(func):
-  """A test skip decorator that only runs a test method if python2.7 and python3 are present."""
-  return skip_unless_all_pythons_present(PY_27, PY_3)(func)
-
-
-def skip_unless_python27_and_python36_present(func):
-  """A test skip decorator that only runs a test method if python2.7 and python3.6 are present."""
-  return skip_unless_all_pythons_present(PY_27, PY_36)(func)
-
-
-def skip_unless_python36_and_python37_present(func):
-  """A test skip decorator that only runs a test method if python3.6 and python3.7 are present."""
-  return skip_unless_all_pythons_present(PY_36, PY_37)(func)
+from pants.testutil.interpreter_selection_utils import PY_2 as PY_2  # noqa
+from pants.testutil.interpreter_selection_utils import PY_3 as PY_3  # noqa
+from pants.testutil.interpreter_selection_utils import PY_27 as PY_27  # noqa
+from pants.testutil.interpreter_selection_utils import PY_36 as PY_36  # noqa
+from pants.testutil.interpreter_selection_utils import PY_37 as PY_37  # noqa
+from pants.testutil.interpreter_selection_utils import has_python_version as has_python_version  # noqa
+from pants.testutil.interpreter_selection_utils import (
+  python_interpreter_path as python_interpreter_path,
+)  # noqa
+from pants.testutil.interpreter_selection_utils import (
+  skip_unless_all_pythons_present as skip_unless_all_pythons_present,
+)  # noqa
+from pants.testutil.interpreter_selection_utils import (
+  skip_unless_python3_present as skip_unless_python3_present,
+)  # noqa
+from pants.testutil.interpreter_selection_utils import (
+  skip_unless_python27_and_python3_present as skip_unless_python27_and_python3_present,
+)  # noqa
+from pants.testutil.interpreter_selection_utils import (
+  skip_unless_python27_and_python36_present as skip_unless_python27_and_python36_present,
+)  # noqa
+from pants.testutil.interpreter_selection_utils import (
+  skip_unless_python27_present as skip_unless_python27_present,
+)  # noqa
+from pants.testutil.interpreter_selection_utils import (
+  skip_unless_python36_and_python37_present as skip_unless_python36_and_python37_present,
+)  # noqa
+from pants.testutil.interpreter_selection_utils import (
+  skip_unless_python36_present as skip_unless_python36_present,
+)  # noqa
diff --git a/tests/python/pants_test/invalidation/BUILD b/tests/python/pants_test/invalidation/BUILD
index cd2233740b3..5d3782d62ce 100644
--- a/tests/python/pants_test/invalidation/BUILD
+++ b/tests/python/pants_test/invalidation/BUILD
@@ -18,8 +18,7 @@ python_tests(
   dependencies = [
     'src/python/pants/invalidation',
     'src/python/pants/util:dirutil',
-    'tests/python/pants_test/testutils:mock_logger',
-    'tests/python/pants_test:task_test_base',
+    'tests/python/pants_test:test_base',
   ]
 )
 
diff --git a/tests/python/pants_test/jvm/BUILD b/tests/python/pants_test/jvm/BUILD
index e255abe64c3..90d5705a5a2 100644
--- a/tests/python/pants_test/jvm/BUILD
+++ b/tests/python/pants_test/jvm/BUILD
@@ -23,19 +23,7 @@ python_library(
   name='jvm_tool_task_test_base',
   sources=['jvm_tool_task_test_base.py'],
   dependencies=[
-    '//:build_tools',
-    '//:3rdparty_directory',
-    ':jvm_task_test_base',
-    'src/python/pants/backend/jvm/subsystems:jvm_tool_mixin',
-    'src/python/pants/backend/jvm/targets:jvm',
-    'src/python/pants/backend/jvm/targets:scala',
-    'src/python/pants/backend/jvm/tasks:bootstrap_jvm_tools',
-    'src/python/pants/backend/jvm/tasks:nailgun_task',
-    'src/python/pants/base:build_environment',
-    'src/python/pants/build_graph',
-    'src/python/pants/ivy',
-    'src/python/pants/java/jar',
-    'src/python/pants/util:dirutil',
+    'src/python/pants/testutil/jvm:jvm_tool_task_test_base',
   ]
 )
 
@@ -43,8 +31,7 @@ python_library(
   name='nailgun_task_test_base',
   sources=['nailgun_task_test_base.py'],
   dependencies=[
-    ':jvm_tool_task_test_base',
-    'src/python/pants/backend/jvm/tasks:nailgun_task',
+    'src/python/pants/testutil/jvm:nailgun_task_test_base',
   ]
 )
 
@@ -52,7 +39,7 @@ python_library(
   name='jar_task_test_base',
   sources=['jar_task_test_base.py'],
   dependencies=[
-    ':nailgun_task_test_base',
+    'src/python/pants/testutil/jvm:jar_task_test_base',
   ]
 )
 
@@ -60,8 +47,6 @@ python_library(
   name='jvm_task_test_base',
   sources=['jvm_task_test_base.py'],
   dependencies=[
-    'src/python/pants/backend/jvm/tasks:classpath_products',
-    'src/python/pants/util:dirutil',
-    'tests/python/pants_test:task_test_base',
+    'src/python/pants/testutil/jvm:jvm_task_test_base',
   ]
 )
diff --git a/tests/python/pants_test/jvm/jar_task_test_base.py b/tests/python/pants_test/jvm/jar_task_test_base.py
index 7e81689985c..855c2b6dc04 100644
--- a/tests/python/pants_test/jvm/jar_task_test_base.py
+++ b/tests/python/pants_test/jvm/jar_task_test_base.py
@@ -1,11 +1,4 @@
 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
 # Licensed under the Apache License, Version 2.0 (see LICENSE).
 
-from pants_test.jvm.nailgun_task_test_base import NailgunTaskTestBase
-
-
-class JarTaskTestBase(NailgunTaskTestBase):
-  """Prepares an ephemeral test build root that supports jar tasks.
-
-  :API: public
-  """
+from pants.testutil.jvm.jar_task_test_base import JarTaskTestBase as JarTaskTestBase  # noqa
diff --git a/tests/python/pants_test/jvm/jvm_task_test_base.py b/tests/python/pants_test/jvm/jvm_task_test_base.py
index 72f1bb9827c..664ad99817b 100644
--- a/tests/python/pants_test/jvm/jvm_task_test_base.py
+++ b/tests/python/pants_test/jvm/jvm_task_test_base.py
@@ -1,61 +1,4 @@
 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
 # Licensed under the Apache License, Version 2.0 (see LICENSE).
 
-import os
-
-from pants.backend.jvm.subsystems.resolve_subsystem import JvmResolveSubsystem
-from pants.backend.jvm.tasks.classpath_products import ClasspathProducts
-from pants.util.dirutil import safe_file_dump, safe_mkdir, safe_mkdtemp
-from pants_test.subsystem.subsystem_util import init_subsystem
-from pants_test.task_test_base import TaskTestBase
-
-
-class JvmTaskTestBase(TaskTestBase):
-  """
-  :API: public
-  """
-
-  def setUp(self):
-    """
-    :API: public
-    """
-    super().setUp()
-    init_subsystem(JvmResolveSubsystem)
-    self.set_options_for_scope('resolver', resolver='ivy')
-
-  def populate_runtime_classpath(self, context, classpath=None):
-    """
-    Helps actual test cases to populate the 'runtime_classpath' products data mapping
-    in the context, which holds the classpath value for targets.
-
-    :API: public
-
-    :param context: The execution context where the products data mapping lives.
-    :param classpath: a list of classpath strings. If not specified,
-                      [os.path.join(self.buildroot, 'none')] will be used.
-    """
-    classpath = classpath or []
-    runtime_classpath = self.get_runtime_classpath(context)
-    runtime_classpath.add_for_targets(context.targets(),
-                                      [('default', entry) for entry in classpath])
-
-  def add_to_runtime_classpath(self, context, tgt, files_dict):
-    """Creates and adds the given files to the classpath for the given target under a temp path.
-
-    :API: public
-    """
-    runtime_classpath = self.get_runtime_classpath(context)
-    # Create a temporary directory under the target id, then dump all files.
-    target_dir = os.path.join(self.test_workdir, tgt.id)
-    safe_mkdir(target_dir)
-    classpath_dir = safe_mkdtemp(dir=target_dir)
-    for rel_path, content in files_dict.items():
-      safe_file_dump(os.path.join(classpath_dir, rel_path), content)
-    # Add to the classpath.
-    runtime_classpath.add_for_target(tgt, [('default', classpath_dir)])
-
-  def get_runtime_classpath(self, context):
-    """
-    :API: public
-    """
-    return context.products.get_data('runtime_classpath', init_func=ClasspathProducts.init_func(self.pants_workdir))
+from pants.testutil.jvm.jvm_task_test_base import JvmTaskTestBase as JvmTaskTestBase  # noqa
diff --git a/tests/python/pants_test/jvm/jvm_tool_task_test_base.py b/tests/python/pants_test/jvm/jvm_tool_task_test_base.py
index 07cb52a0bf7..0f25dde0450 100644
--- a/tests/python/pants_test/jvm/jvm_tool_task_test_base.py
+++ b/tests/python/pants_test/jvm/jvm_tool_task_test_base.py
@@ -1,122 +1,4 @@
 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
 # Licensed under the Apache License, Version 2.0 (see LICENSE).
 
-import os
-import shutil
-
-from pants.backend.jvm.register import build_file_aliases
-from pants.backend.jvm.subsystems.jvm_tool_mixin import JvmToolMixin
-from pants.backend.jvm.tasks.bootstrap_jvm_tools import BootstrapJvmTools
-from pants.backend.jvm.tasks.nailgun_task import NailgunTask
-from pants.base.build_environment import get_pants_cachedir
-from pants.build_graph.build_file_aliases import BuildFileAliases
-from pants.build_graph.target import Target
-from pants.ivy.bootstrapper import Bootstrapper
-from pants.util.dirutil import safe_mkdir
-from pants_test.jvm.jvm_task_test_base import JvmTaskTestBase
-
-
-class JvmToolTaskTestBase(JvmTaskTestBase):
-  """Prepares an ephemeral test build root that supports tasks that use jvm tool bootstrapping.
-
-  :API: public
-  """
-
-  @classmethod
-  def alias_groups(cls):
-    """
-    :API: public
-    """
-    # Aliases appearing in our real BUILD.tools.
-    return build_file_aliases().merge(BuildFileAliases(targets={'target': Target}))
-
-  def setUp(self):
-    """
-    :API: public
-    """
-    super().setUp()
-
-    # Use a synthetic subclass for proper isolation when bootstrapping within the test.
-    bootstrap_scope = 'bootstrap_scope'
-    self.bootstrap_task_type = self.synthesize_task_subtype(BootstrapJvmTools, bootstrap_scope)
-    JvmToolMixin.reset_registered_tools()
-
-    # Set some options:
-
-    # 1. Cap BootstrapJvmTools memory usage in tests.  The Xmx was empirically arrived upon using
-    #    -Xloggc and verifying no full gcs for a test using the full gamut of resolving a multi-jar
-    #    tool, constructing a fat jar and then shading that fat jar.
-    #
-    # 2. Allow tests to read/write tool jars from the real artifact cache, so they don't
-    #    each have to resolve and shade them every single time, which is a huge slowdown.
-    #    Note that local artifact cache writes are atomic, so it's fine for multiple concurrent
-    #    tests to write to it.
-    #
-    # Note that we don't have access to the invoking pants instance's options, so we assume that
-    # its artifact cache is in the standard location.  If it isn't, worst case the tests will
-    # populate a second cache at the standard location, which is no big deal.
-    # TODO: We really need a straightforward way for pants's own tests to get to the enclosing
-    # pants instance's options values.
-    artifact_caches = [os.path.join(get_pants_cachedir(), 'artifact_cache')]
-    self.set_options_for_scope(bootstrap_scope,
-                               execution_strategy=NailgunTask.ExecutionStrategy.subprocess,
-                               jvm_options=['-Xmx128m'])
-    self.set_options_for_scope('cache.{}'.format(bootstrap_scope),
-                               read_from=artifact_caches,
-                               write_to=artifact_caches)
-
-    # Copy into synthetic build-root
-    shutil.copy('BUILD.tools', self.build_root)
-    build_root_third_party = os.path.join(self.build_root, '3rdparty')
-    safe_mkdir(build_root_third_party)
-    shutil.copy(os.path.join('3rdparty', 'BUILD'), build_root_third_party)
-
-    Bootstrapper.reset_instance()
-
-  def context(self, for_task_types=None, **kwargs):
-    """
-    :API: public
-    """
-    # Add in the bootstrapper task type, so its options get registered and set.
-    for_task_types = [self.bootstrap_task_type] + (for_task_types or [])
-    return super().context(for_task_types=for_task_types, **kwargs)
-
-  def prepare_execute(self, context):
-    """Prepares a jvm tool-using task for execution, first bootstrapping any required jvm tools.
-
-    Note: Other task pre-requisites will not be ensured and tests must instead setup their own
-          product requirements if any.
-
-    :API: public
-
-    :returns: The prepared Task instance.
-    """
-    # test_workdir is an @property
-    workdir = self.test_workdir
-
-    # Bootstrap the tools needed by the task under test.
-    # We need the bootstrap task's workdir to be under the test's .pants.d, so that it can
-    # use artifact caching.  Making it a sibling of the main task's workdir achieves this.
-    self.bootstrap_task_type.get_alternate_target_roots(context.options,
-                                                        self.address_mapper,
-                                                        self.build_graph)
-    bootstrap_workdir = os.path.join(os.path.dirname(workdir), 'bootstrap_jvm_tools')
-    self.bootstrap_task_type(context, bootstrap_workdir).execute()
-
-    task = self.create_task(context, workdir)
-    return task
-
-  def execute(self, context):
-    """Executes a jvm tool-using task, first bootstrapping any required jvm tools.
-
-    Note: Other task pre-requisites will not be ensured and tests must instead setup their own
-          product requirements if any.
-
-    :API: public
-
-    :returns: The Task instance that was executed.
-    """
-    task = self.prepare_execute(context)
-    if not task.skip_execution:
-      task.execute()
-    return task
+from pants.testutil.jvm.jvm_tool_task_test_base import JvmToolTaskTestBase as JvmToolTaskTestBase  # noqa
diff --git a/tests/python/pants_test/jvm/nailgun_task_test_base.py b/tests/python/pants_test/jvm/nailgun_task_test_base.py
index 31e5fc1d63b..7feb07cb975 100644
--- a/tests/python/pants_test/jvm/nailgun_task_test_base.py
+++ b/tests/python/pants_test/jvm/nailgun_task_test_base.py
@@ -1,23 +1,4 @@
 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
 # Licensed under the Apache License, Version 2.0 (see LICENSE).
 
-from pants.backend.jvm.tasks.nailgun_task import NailgunTask
-from pants_test.jvm.jvm_tool_task_test_base import JvmToolTaskTestBase
-
-
-class NailgunTaskTestBase(JvmToolTaskTestBase):
-  """Ensures `NailgunTask` tests use subprocess mode to stably test the task under test.
-
-  For subclasses of NailgunTask the nailgun behavior is irrelevant to the code under test and can
-  cause problems in CI environments. As such, disabling nailgunning ensures the test focus is where
-  it needs to be to test the unit.
-
-  :API: public
-  """
-
-  def setUp(self):
-    """
-    :API: public
-    """
-    super().setUp()
-    self.set_options(execution_strategy=NailgunTask.ExecutionStrategy.subprocess)
+from pants.testutil.jvm.nailgun_task_test_base import NailgunTaskTestBase as NailgunTaskTestBase  # noqa
diff --git a/tests/python/pants_test/option/BUILD b/tests/python/pants_test/option/BUILD
index 8e7f3d9455f..deb7975b73c 100644
--- a/tests/python/pants_test/option/BUILD
+++ b/tests/python/pants_test/option/BUILD
@@ -10,6 +10,7 @@ python_tests(
     'src/python/pants/base:deprecated',
     'src/python/pants/option',
     'src/python/pants/util:contextutil',
+    'tests/python/pants_test/option/util',
     'tests/python/pants_test:test_base',
   ],
   timeout=30,
diff --git a/tests/python/pants_test/option/util/BUILD b/tests/python/pants_test/option/util/BUILD
index 2ac6c29035b..2808f47dfc1 100644
--- a/tests/python/pants_test/option/util/BUILD
+++ b/tests/python/pants_test/option/util/BUILD
@@ -3,8 +3,6 @@
 
 python_library(
   dependencies=[
-    '//:build_root',
-    '//:pants_ini',
-    'src/python/pants/option',
+    'src/python/pants/testutil/option'
   ],
 )
diff --git a/tests/python/pants_test/option/util/fakes.py b/tests/python/pants_test/option/util/fakes.py
index 202fefdbcbd..c645441487b 100644
--- a/tests/python/pants_test/option/util/fakes.py
+++ b/tests/python/pants_test/option/util/fakes.py
@@ -1,198 +1,11 @@
 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
 # Licensed under the Apache License, Version 2.0 (see LICENSE).
 
-from collections import defaultdict
-
-from pants.option.global_options import GlobalOptionsRegistrar
-from pants.option.option_util import is_list_option
-from pants.option.parser import Parser
-from pants.option.parser_hierarchy import enclosing_scope
-from pants.option.ranked_value import RankedValue
-from pants.option.scope import GLOBAL_SCOPE
-
-
-class _FakeOptionValues(object):
-  def __init__(self, option_values):
-    self._option_values = option_values
-
-  def __iter__(self):
-    return iter(self._option_values.keys())
-
-  def __getitem__(self, key):
-    return getattr(self, key)
-
-  def get(self, key, default=None):
-    if hasattr(self, key):
-      return getattr(self, key, default)
-    return default
-
-  def __getattr__(self, key):
-    try:
-      value = self._option_values[key]
-    except KeyError:
-      # Instead of letting KeyError raise here, re-raise an AttributeError to not break getattr().
-      raise AttributeError(key)
-    return value.value if isinstance(value, RankedValue) else value
-
-  def get_rank(self, key):
-    value = self._option_values[key]
-    return value.rank if isinstance(value, RankedValue) else RankedValue.FLAG
-
-  def is_flagged(self, key):
-    return self.get_rank(key) == RankedValue.FLAG
-
-  def is_default(self, key):
-    return self.get_rank(key) in (RankedValue.NONE, RankedValue.HARDCODED)
-
-  @property
-  def option_values(self):
-    return self._option_values
-
-
-def _options_registration_function(defaults, fingerprintables):
-  def register(*args, **kwargs):
-    option_name = Parser.parse_dest(*args, **kwargs)
-
-    default = kwargs.get('default')
-    if default is None:
-      if kwargs.get('type') == bool:
-        default = False
-      if kwargs.get('type') == list:
-        default = []
-    defaults[option_name] = RankedValue(RankedValue.HARDCODED, default)
-
-    fingerprint = kwargs.get('fingerprint', False)
-    if fingerprint:
-      if is_list_option(kwargs):
-        val_type = kwargs.get('member_type', str)
-      else:
-        val_type = kwargs.get('type', str)
-      fingerprintables[option_name] = val_type
-
-  return register
-
-
-def create_options(options, passthru_args=None, fingerprintable_options=None):
-  """Create a fake Options object for testing.
-
-  Note that the returned object only provides access to the provided options values. There is
-  no registration mechanism on this object. Code under test shouldn't care about resolving
-  cmd-line flags vs. config vs. env vars etc. etc.
-
-  :param dict options: A dict of scope -> (dict of option name -> value).
-  :param list passthru_args: A list of passthrough command line argument values.
-  :param dict fingerprintable_options: A dict of scope -> (dict of option name -> option type).
-                                       This registry should contain entries for any of the
-                                       `options` that are expected to contribute to fingerprinting.
-  :returns: An fake `Options` object encapsulating the given scoped options.
-  """
-  fingerprintable = fingerprintable_options or defaultdict(dict)
-
-  class FakeOptions:
-    def for_scope(self, scope):
-      # TODO(John Sirois): Some users pass in A dict of scope -> _FakeOptionValues instead of a
-      # dict of scope -> (dict of option name -> value).  Clean up these usages and kill this
-      # accommodation.
-      options_for_this_scope = options.get(scope) or {}
-      if isinstance(options_for_this_scope, _FakeOptionValues):
-        options_for_this_scope = options_for_this_scope.option_values
-
-      scoped_options = {}
-      if scope:
-        scoped_options.update(self.for_scope(enclosing_scope(scope)).option_values)
-      scoped_options.update(options_for_this_scope)
-      return _FakeOptionValues(scoped_options)
-
-    def for_global_scope(self):
-      return self.for_scope(GLOBAL_SCOPE)
-
-    def passthru_args_for_scope(self, scope):
-      return passthru_args or []
-
-    def items(self):
-      return list(options.items())
-
-    @property
-    def scope_to_flags(self):
-      return {}
-
-    def get_fingerprintable_for_scope(self, bottom_scope, include_passthru=False):
-      """Returns a list of fingerprintable (option type, option value) pairs for
-      the given scope.
-
-      Note that this method only collects values for a single scope, NOT from
-      all enclosing scopes as in the Options class!
-
-      :param str bottom_scope: The scope to gather fingerprintable options for.
-      :param bool include_passthru: Whether to include passthru args captured by `bottom_scope` in the
-                                    fingerprintable options.
-      """
-      pairs = []
-      if include_passthru:
-        pu_args = self.passthru_args_for_scope(bottom_scope)
-        pairs.extend((str, arg) for arg in pu_args)
-
-      option_values = self.for_scope(bottom_scope)
-      for option_name, option_type in fingerprintable[bottom_scope].items():
-        pairs.append((option_type, option_values[option_name]))
-      return pairs
-
-    def __getitem__(self, scope):
-      return self.for_scope(scope)
-
-  return FakeOptions()
-
-
-def create_options_for_optionables(optionables,
-                                   options=None,
-                                   options_fingerprintable=None,
-                                   passthru_args=None):
-  """Create a fake Options object for testing with appropriate defaults for the given optionables.
-
-  Any scoped `options` provided will override defaults, behaving as-if set on the command line.
-
-  :param iterable optionables: A series of `Optionable` types to register default options for.
-  :param dict options: A dict of scope -> (dict of option name -> value) representing option values
-                       explicitly set via the command line.
-  :param dict options_fingerprintable: A dict of scope -> (dict of option name -> option type)
-                                       representing the fingerprintable options
-                                       and the scopes they are registered for.
-  :param list passthru_args: A list of passthrough args (specified after `--` on the command line).
-  :returns: A fake `Options` object with defaults populated for the given `optionables` and any
-            explicitly set `options` overlayed.
-  """
-  all_options = defaultdict(dict)
-  fingerprintable_options = defaultdict(dict)
-  bootstrap_option_values = None
-
-  if options_fingerprintable:
-    for scope, opts in options_fingerprintable.items():
-      fingerprintable_options[scope].update(opts)
-
-  def register_func(on_scope):
-    scoped_options = all_options[on_scope]
-    scoped_fingerprintables = fingerprintable_options[on_scope]
-    register = _options_registration_function(scoped_options, scoped_fingerprintables)
-    register.bootstrap = bootstrap_option_values
-    register.scope = on_scope
-    return register
-
-  # TODO: This sequence is a bit repetitive of the real registration sequence.
-
-  # Register bootstrap options and grab their default values for use in subsequent registration.
-  GlobalOptionsRegistrar.register_bootstrap_options(register_func(GLOBAL_SCOPE))
-  bootstrap_option_values = _FakeOptionValues(all_options[GLOBAL_SCOPE].copy())
-
-  # Now register the full global scope options.
-  GlobalOptionsRegistrar.register_options(register_func(GLOBAL_SCOPE))
-
-  for optionable in optionables:
-    optionable.register_options(register_func(optionable.options_scope))
-
-  if options:
-    for scope, opts in options.items():
-      all_options[scope].update(opts)
-
-  return create_options(all_options,
-                        passthru_args=passthru_args,
-                        fingerprintable_options=fingerprintable_options)
+from pants.testutil.option.fakes import _FakeOptionValues as _FakeOptionValues  # noqa
+from pants.testutil.option.fakes import (
+  _options_registration_function as _options_registration_function,
+)  # noqa
+from pants.testutil.option.fakes import create_options as create_options  # noqa
+from pants.testutil.option.fakes import (
+  create_options_for_optionables as create_options_for_optionables,
+)  # noqa
diff --git a/tests/python/pants_test/pants_run_integration_test.py b/tests/python/pants_test/pants_run_integration_test.py
index 849ec7cfb89..0edfa7de92c 100644
--- a/tests/python/pants_test/pants_run_integration_test.py
+++ b/tests/python/pants_test/pants_run_integration_test.py
@@ -1,664 +1,13 @@
 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
 # Licensed under the Apache License, Version 2.0 (see LICENSE).
 
-import configparser
-import glob
-import os
-import re
-import shutil
-import subprocess
-import sys
-import unittest
-from contextlib import contextmanager
-from dataclasses import dataclass
-from operator import eq, ne
-from threading import Lock
-from typing import Any, Callable, List, Optional, Union
-
-from colors import strip_color
-
-from pants.base.build_environment import get_buildroot
-from pants.base.build_file import BuildFile
-from pants.base.exiter import PANTS_SUCCEEDED_EXIT_CODE
-from pants.fs.archive import ZIP
-from pants.subsystem.subsystem import Subsystem
-from pants.util.contextutil import environment_as, pushd, temporary_dir
-from pants.util.dirutil import fast_relpath, safe_mkdir, safe_mkdir_for, safe_open
-from pants.util.osutil import Pid
-from pants.util.process_handler import SubprocessProcessHandler
-from pants.util.strutil import ensure_binary
-from pants_test.testutils.file_test_util import check_symlinks, contains_exact_files
-
-
-# NB: If `shell=True`, it's a single `str`.
-Command = Union[str, List[str]]
-
-
-@dataclass(frozen=True)
-class PantsResult:
-  command: Command
-  returncode: int
-  stdout_data: str
-  stderr_data: str
-  workdir: str
-  pid: Pid
-
-
-@dataclass(frozen=True)
-class PantsJoinHandle:
-  command: Command
-  process: subprocess.Popen
-  workdir: str
-
-  def join(
-    self, stdin_data: Optional[Union[bytes, str]] = None, tee_output: bool = False
-  ) -> PantsResult:
-    """Wait for the pants process to complete, and return a PantsResult for it."""
-
-    communicate_fn = self.process.communicate
-    if tee_output:
-      communicate_fn = SubprocessProcessHandler(self.process).communicate_teeing_stdout_and_stderr
-    if stdin_data is not None:
-      stdin_data = ensure_binary(stdin_data)
-    (stdout_data, stderr_data) = communicate_fn(stdin_data)
-
-    if self.process.returncode != PANTS_SUCCEEDED_EXIT_CODE:
-      render_logs(self.workdir)
-
-    return PantsResult(
-      command=self.command,
-      returncode=self.process.returncode,
-      stdout_data=stdout_data.decode(),
-      stderr_data=stderr_data.decode(),
-      workdir=self.workdir,
-      pid=self.process.pid
-    )
-
-
-def ensure_cached(expected_num_artifacts=None):
-  """Decorator for asserting cache writes in an integration test.
-
-  :param expected_num_artifacts: Expected number of artifacts to be in the task's
-                                 cache after running the test. If unspecified, will
-                                 assert that the number of artifacts in the cache is
-                                 non-zero.
-  """
-  def decorator(test_fn):
-    def wrapper(self, *args, **kwargs):
-      with temporary_dir() as artifact_cache:
-        cache_args = f'--cache-write-to=["{artifact_cache}"]'
-
-        test_fn(self, *args + (cache_args,), **kwargs)
-
-        num_artifacts = 0
-        for (root, _, files) in os.walk(artifact_cache):
-          print(root, files)
-          num_artifacts += len(files)
-
-        if expected_num_artifacts is None:
-          self.assertNotEqual(num_artifacts, 0)
-        else:
-          self.assertEqual(num_artifacts, expected_num_artifacts)
-    return wrapper
-  return decorator
-
-
-def ensure_resolver(f):
-  """A decorator for running an integration test with ivy and coursier as the resolver."""
-  def wrapper(self, *args, **kwargs):
-    for env_var_value in ('ivy', 'coursier'):
-      with environment_as(HERMETIC_ENV='PANTS_RESOLVER_RESOLVER', PANTS_RESOLVER_RESOLVER=env_var_value):
-        f(self, *args, **kwargs)
-
-  return wrapper
-
-
-def ensure_daemon(f):
-  """A decorator for running an integration test with and without the daemon enabled."""
-  def wrapper(self, *args, **kwargs):
-    for enable_daemon in [False, True]:
-      with temporary_dir() as subprocess_dir:
-        enable_daemon_str = str(enable_daemon)
-        env = {
-            'HERMETIC_ENV': 'PANTS_ENABLE_PANTSD,PANTS_ENABLE_V2_ENGINE,PANTS_SUBPROCESSDIR',
-            'PANTS_ENABLE_PANTSD': enable_daemon_str,
-            'PANTS_ENABLE_V2_ENGINE': enable_daemon_str,
-            'PANTS_SUBPROCESSDIR': subprocess_dir,
-          }
-        with environment_as(**env):
-          try:
-            f(self, *args, **kwargs)
-            if enable_daemon:
-              self.assert_success(self.run_pants(['kill-pantsd']))
-          except Exception:
-            print(f'Test failed with enable-pantsd={enable_daemon}:')
-            if enable_daemon:
-              # If we are already raising, do not attempt to confirm that `kill-pantsd` succeeds.
-              self.run_pants(['kill-pantsd'])
-            else:
-              print('Skipping run with enable-pantsd=true because it already failed with enable-pantsd=false.')
-            raise
-  return wrapper
-
-
-def render_logs(workdir):
-  """Renders all potentially relevant logs from the given workdir to stdout."""
-  filenames = list(
-      glob.glob(os.path.join(workdir, 'logs/exceptions*log'))
-    ) + list(
-      glob.glob(os.path.join(workdir, 'pantsd/pantsd.log'))
-    )
-  for filename in filenames:
-    rel_filename = fast_relpath(filename, workdir)
-    print(f'{rel_filename} +++ ')
-    for line in _read_log(filename):
-      print(f'{rel_filename} >>> {line}')
-    print(f'{rel_filename} --- ')
-
-
-def read_pantsd_log(workdir):
-  """Yields all lines from the pantsd log under the given workdir."""
-  # Surface the pantsd log for easy viewing via pytest's `-s` (don't capture stdio) option.
-  for line in _read_log(f'{workdir}/pantsd/pantsd.log'):
-    yield line
-
-
-def _read_log(filename):
-  with open(filename, 'r') as f:
-    for line in f:
-      yield line.rstrip()
-
-
-class PantsRunIntegrationTest(unittest.TestCase):
-  """A base class useful for integration tests for targets in the same repo."""
-
-  class InvalidTestEnvironmentError(Exception):
-    """Raised when the external environment is not set up properly to run integration tests."""
-
-  @classmethod
-  def use_pantsd_env_var(cls):
-    """Subclasses may override to acknowledge that the tests cannot run when pantsd is enabled,
-    or they want to configure pantsd themselves.
-
-    In those cases, --enable-pantsd will not be added to their configuration.
-    This approach is coarsely grained, meaning we disable pantsd in some tests that actually run
-    when pantsd is enabled. However:
-      - The number of mislabeled tests is currently small (~20 tests).
-      - Those tests will still run, just with pantsd disabled.
-
-    N.B. Currently, this doesn't interact with test hermeticity.
-    This means that, if the test coordinator has set PANTS_ENABLE_PANTSD, and a test is not marked
-    as hermetic, it will run under pantsd regardless of the value of this function.
-    """
-    should_pantsd = os.getenv("USE_PANTSD_FOR_INTEGRATION_TESTS")
-    return should_pantsd in ["True", "true", "1"]
-
-  @classmethod
-  def hermetic(cls):
-    """Subclasses may override to acknowledge that they are hermetic.
-
-    That is, that they should run without reading the real pants.ini.
-    """
-    return False
-
-  @classmethod
-  def hermetic_env_whitelist(cls):
-    """A whitelist of environment variables to propagate to tests when hermetic=True."""
-    return [
-        # Used in the wrapper script to locate a rust install.
-        'HOME',
-        # Needed to find python interpreters and other binaries.
-        'PATH',
-        'PANTS_PROFILE',
-        # Ensure that the underlying ./pants invocation doesn't run from sources
-        # (and therefore bootstrap) if we don't want it to.
-        'RUN_PANTS_FROM_PEX',
-      ]
-
-  def setUp(self):
-    super().setUp()
-    # Some integration tests rely on clean subsystem state (e.g., to set up a DistributionLocator).
-    Subsystem.reset()
-
-  def temporary_workdir(self, cleanup=True):
-    # We can hard-code '.pants.d' here because we know that will always be its value
-    # in the pantsbuild/pants repo (e.g., that's what we .gitignore in that repo).
-    # Grabbing the pants_workdir config would require this pants's config object,
-    # which we don't have a reference to here.
-    root = os.path.join(get_buildroot(), '.pants.d', 'tmp')
-    safe_mkdir(root)
-    return temporary_dir(root_dir=root, cleanup=cleanup, suffix='.pants.d')
-
-  def temporary_cachedir(self):
-    return temporary_dir(suffix='__CACHEDIR')
-
-  def temporary_sourcedir(self):
-    return temporary_dir(root_dir=get_buildroot())
-
-  @contextmanager
-  def source_clone(self, source_dir):
-    with self.temporary_sourcedir() as clone_dir:
-      target_spec_dir = os.path.relpath(clone_dir)
-
-      for dir_path, dir_names, file_names in os.walk(source_dir):
-        clone_dir_path = os.path.join(clone_dir, os.path.relpath(dir_path, source_dir))
-        for dir_name in dir_names:
-          os.mkdir(os.path.join(clone_dir_path, dir_name))
-        for file_name in file_names:
-          with open(os.path.join(dir_path, file_name), 'r') as f:
-            content = f.read()
-          if BuildFile._is_buildfile_name(file_name):
-            content = content.replace(source_dir, target_spec_dir)
-          with open(os.path.join(clone_dir_path, file_name), 'w') as f:
-            f.write(content)
-
-      yield clone_dir
-
-  # Incremented each time we spawn a pants subprocess.
-  # Appended to PANTS_PROFILE in the called pants process, so that each subprocess
-  # writes to its own profile file, instead of all stomping on the parent process's profile.
-  _profile_disambiguator = 0
-  _profile_disambiguator_lock = Lock()
-
-  @classmethod
-  def _get_profile_disambiguator(cls):
-    with cls._profile_disambiguator_lock:
-      ret = cls._profile_disambiguator
-      cls._profile_disambiguator += 1
-      return ret
-
-  def get_cache_subdir(self, cache_dir, subdir_glob='*/', other_dirs=()):
-    """Check that there is only one entry of `cache_dir` which matches the glob
-    specified by `subdir_glob`, excluding `other_dirs`, and
-    return it.
-
-    :param str cache_dir: absolute path to some directory.
-    :param str subdir_glob: string specifying a glob for (one level down)
-                            subdirectories of `cache_dir`.
-    :param list other_dirs: absolute paths to subdirectories of `cache_dir`
-                            which must exist and match `subdir_glob`.
-    :return: Assert that there is a single remaining directory entry matching
-             `subdir_glob` after removing `other_dirs`, and return it.
-
-             This method oes not check if its arguments or return values are
-             files or directories. If `subdir_glob` has a trailing slash, so
-             will the return value of this method.
-    """
-    subdirs = set(glob.glob(os.path.join(cache_dir, subdir_glob)))
-    other_dirs = set(other_dirs)
-    self.assertTrue(other_dirs.issubset(subdirs))
-    remaining_dirs = subdirs - other_dirs
-    self.assertEqual(len(remaining_dirs), 1)
-    return list(remaining_dirs)[0]
-
-  def run_pants_with_workdir_without_waiting(self, command, workdir, config=None, extra_env=None,
-                                             build_root=None, print_exception_stacktrace=True,
-                                             **kwargs):
-    args = [
-      '--no-pantsrc',
-      f'--pants-workdir={workdir}',
-      '--kill-nailguns',
-      f'--print-exception-stacktrace={print_exception_stacktrace}',
-    ]
-
-    if self.hermetic():
-      args.extend(['--pants-config-files=[]',
-                   # Turn off cache globally.  A hermetic integration test shouldn't rely on cache,
-                   # or we have no idea if it's actually testing anything.
-                   '--no-cache-read', '--no-cache-write',
-                   # Turn cache on just for tool bootstrapping, for performance.
-                   '--cache-bootstrap-read', '--cache-bootstrap-write'
-                   ])
-
-    if self.use_pantsd_env_var():
-      args.append("--enable-pantsd=True")
-      args.append("--no-shutdown-pantsd-after-run")
-
-    if config:
-      config_data = config.copy()
-      # TODO(#6071): RawConfigParser is legacy. Investigate updating to modern API.
-      ini = configparser.RawConfigParser(defaults=config_data.pop('DEFAULT', None))
-      for section, section_config in config_data.items():
-        ini.add_section(section)
-        for key, value in section_config.items():
-          ini.set(section, key, value)
-      ini_file_name = os.path.join(workdir, 'pants.ini')
-      with safe_open(ini_file_name, mode='w') as fp:
-        ini.write(fp)
-      args.append('--pants-config-files=' + ini_file_name)
-
-    pants_script = [sys.executable, '-m', 'pants']
-
-    # Permit usage of shell=True and string-based commands to allow e.g. `./pants | head`.
-    if kwargs.get('shell') is True:
-      assert not isinstance(command, list), 'must pass command as a string when using shell=True'
-      pants_command = ' '.join([*pants_script, ' '.join(args), command])
-    else:
-      pants_command = pants_script + args + command
-
-    # Only whitelisted entries will be included in the environment if hermetic=True.
-    if self.hermetic():
-      env = dict()
-      # With an empty environment, we would generally get the true underlying system default
-      # encoding, which is unlikely to be what we want (it's generally ASCII, still). So we
-      # explicitly set an encoding here.
-      env['LC_ALL'] = 'en_US.UTF-8'
-      for h in self.hermetic_env_whitelist():
-        value = os.getenv(h)
-        if value is not None:
-          env[h] = value
-      hermetic_env = os.getenv('HERMETIC_ENV')
-      if hermetic_env:
-        for h in hermetic_env.strip(',').split(','):
-          env[h] = os.getenv(h)
-    else:
-      env = os.environ.copy()
-    if extra_env:
-      env.update(extra_env)
-    env.update(PYTHONPATH=os.pathsep.join(sys.path))
-
-    # Don't overwrite the profile of this process in the called process.
-    # Instead, write the profile into a sibling file.
-    if env.get('PANTS_PROFILE'):
-      prof = f"{env['PANTS_PROFILE']}.{self._get_profile_disambiguator()}"
-      env['PANTS_PROFILE'] = prof
-      # Make a note the subprocess command, so the user can correctly interpret the profile files.
-      with open(f'{prof}.cmd', 'w') as fp:
-        fp.write(' '.join(pants_command))
-
-    return PantsJoinHandle(
-        command=pants_command,
-        process=subprocess.Popen(
-          pants_command,
-          env=env,
-          stdin=subprocess.PIPE,
-          stdout=subprocess.PIPE,
-          stderr=subprocess.PIPE,
-          **kwargs
-        ),
-        workdir=workdir
-      )
-
-  def run_pants_with_workdir(
-    self, command, workdir, config=None, stdin_data=None, tee_output=False, **kwargs
-  ) -> PantsResult:
-    if config:
-      kwargs["config"] = config
-    handle = self.run_pants_with_workdir_without_waiting(command, workdir, **kwargs)
-    return handle.join(stdin_data=stdin_data, tee_output=tee_output)
-
-  def run_pants(
-    self, command, config=None, stdin_data=None, extra_env=None, cleanup_workdir=True, **kwargs
-  ) -> PantsResult:
-    """Runs pants in a subprocess.
-
-    :param list command: A list of command line arguments coming after `./pants`.
-    :param config: Optional data for a generated ini file. A map of <section-name> ->
-    map of key -> value. If order in the ini file matters, this should be an OrderedDict.
-    :param kwargs: Extra keyword args to pass to `subprocess.Popen`.
-    """
-    with self.temporary_workdir() as workdir:
-      return self.run_pants_with_workdir(
-        command,
-        workdir,
-        config,
-        stdin_data=stdin_data,
-        extra_env=extra_env,
-        **kwargs
-      )
-
-  @contextmanager
-  def pants_results(self, command, config=None, stdin_data=None, extra_env=None, **kwargs):
-    """Similar to run_pants in that it runs pants in a subprocess, but yields in order to give
-    callers a chance to do any necessary validations on the workdir.
-
-    :param list command: A list of command line arguments coming after `./pants`.
-    :param config: Optional data for a generated ini file. A map of <section-name> ->
-    map of key -> value. If order in the ini file matters, this should be an OrderedDict.
-    :param kwargs: Extra keyword args to pass to `subprocess.Popen`.
-    :returns a PantsResult instance.
-    """
-    with self.temporary_workdir() as workdir:
-      yield self.run_pants_with_workdir(
-        command,
-        workdir,
-        config,
-        stdin_data=stdin_data,
-        extra_env=extra_env,
-        **kwargs
-      )
-
-  def bundle_and_run(self, target, bundle_name, bundle_jar_name=None, bundle_options=None,
-                     args=None,
-                     expected_bundle_jar_content=None,
-                     expected_bundle_content=None,
-                     library_jars_are_symlinks=True):
-    """Creates the bundle with pants, then does java -jar {bundle_name}.jar to execute the bundle.
-
-    :param target: target name to compile
-    :param bundle_name: resulting bundle filename (minus .zip extension)
-    :param bundle_jar_name: monolithic jar filename (minus .jar extension), if None will be the
-      same as bundle_name
-    :param bundle_options: additional options for bundle
-    :param args: optional arguments to pass to executable
-    :param expected_bundle_content: verify the bundle zip content
-    :param expected_bundle_jar_content: verify the bundle jar content
-    :param library_jars_are_symlinks: verify library jars are symlinks if True, and actual
-      files if False. Default `True` because we always create symlinks for both external and internal
-      dependencies, only exception is when shading is used.
-    :return: stdout as a string on success, raises an Exception on error
-    """
-    bundle_jar_name = bundle_jar_name or bundle_name
-    bundle_options = bundle_options or []
-    bundle_options = ['bundle.jvm'] + bundle_options + ['--archive=zip', target]
-    with self.pants_results(bundle_options) as pants_run:
-      self.assert_success(pants_run)
-
-      self.assertTrue(check_symlinks(f'dist/{bundle_name}-bundle/libs', library_jars_are_symlinks))
-      # TODO(John Sirois): We need a zip here to suck in external library classpath elements
-      # pointed to by symlinks in the run_pants ephemeral tmpdir.  Switch run_pants to be a
-      # contextmanager that yields its results while the tmpdir workdir is still active and change
-      # this test back to using an un-archived bundle.
-      with temporary_dir() as workdir:
-        ZIP.extract('dist/{bundle_name}.zip'.format(bundle_name=bundle_name), workdir)
-        if expected_bundle_content:
-          self.assertTrue(contains_exact_files(workdir, expected_bundle_content))
-        if expected_bundle_jar_content:
-          with temporary_dir() as check_bundle_jar_dir:
-            bundle_jar = os.path.join(workdir, f'{bundle_jar_name}.jar')
-            ZIP.extract(bundle_jar, check_bundle_jar_dir)
-            self.assertTrue(contains_exact_files(check_bundle_jar_dir, expected_bundle_jar_content))
-
-        optional_args = []
-        if args:
-          optional_args = args
-        java_run = subprocess.Popen(
-          ['java', '-jar', f'{bundle_jar_name}.jar'] + optional_args,
-          stdout=subprocess.PIPE,
-          cwd=workdir
-        )
-
-        stdout, _ = java_run.communicate()
-      java_returncode = java_run.returncode
-      self.assertEqual(java_returncode, 0)
-      return stdout.decode()
-
-  def assert_success(self, pants_run: PantsResult, msg=None):
-    self.assert_result(pants_run, PANTS_SUCCEEDED_EXIT_CODE, expected=True, msg=msg)
-
-  def assert_failure(self, pants_run: PantsResult, msg=None):
-    self.assert_result(pants_run, PANTS_SUCCEEDED_EXIT_CODE, expected=False, msg=msg)
-
-  def assert_result(self, pants_run: PantsResult, value, expected=True, msg=None):
-    check, assertion = (eq, self.assertEqual) if expected else (ne, self.assertNotEqual)
-    if check(pants_run.returncode, value):
-      return
-
-    details = [msg] if msg else []
-    details.append(' '.join(pants_run.command))
-    details.append(f'returncode: {pants_run.returncode}')
-
-    def indent(content):
-      return '\n\t'.join(content.splitlines())
-
-    details.append(f'stdout:\n\t{indent(pants_run.stdout_data)}')
-    details.append(f'stderr:\n\t{indent(pants_run.stderr_data)}')
-    error_msg = '\n'.join(details)
-
-    assertion(value, pants_run.returncode, error_msg)
-
-  def assert_run_contains_log(self, msg, level, module, pants_run: PantsResult):
-    """Asserts that the passed run's stderr contained the log message."""
-    self.assert_contains_log(msg, level, module, pants_run.stderr_data, pants_run.pid)
-
-  def assert_contains_log(self, msg, level, module, log, pid=None):
-    """
-    Asserts that the passed log contains the message logged by the module at the level.
-
-    If pid is specified, performs an exact match including the pid of the pants process.
-    Otherwise performs a regex match asserting that some pid is present.
-    """
-    prefix = f"[{level}] {module}:pid="
-    suffix = f": {msg}"
-    if pid is None:
-      self.assertRegex(log, re.escape(prefix) + r"\d+" + re.escape(suffix))
-    else:
-      self.assertIn(f"{prefix}{pid}{suffix}", log)
-
-  def assert_is_file(self, file_path):
-    self.assertTrue(os.path.isfile(file_path), f'file path {file_path} does not exist!')
-
-  def assert_is_not_file(self, file_path):
-    self.assertFalse(os.path.isfile(file_path), f'file path {file_path} exists!')
-
-  def normalize(self, s: str) -> str:
-    """Removes escape sequences (e.g. colored output) and all whitespace from string s."""
-    return ''.join(strip_color(s).split())
-
-  @contextmanager
-  def file_renamed(self, prefix, test_name, real_name):
-    real_path = os.path.join(prefix, real_name)
-    test_path = os.path.join(prefix, test_name)
-    try:
-      os.rename(test_path, real_path)
-      yield
-    finally:
-      os.rename(real_path, test_path)
-
-  @contextmanager
-  def temporary_file_content(self, path, content, binary_mode=True):
-    """Temporarily write content to a file for the purpose of an integration test."""
-    path = os.path.realpath(path)
-    assert path.startswith(
-      os.path.realpath(get_buildroot())), 'cannot write paths outside of the buildroot!'
-    assert not os.path.exists(path), 'refusing to overwrite an existing path!'
-    mode = 'wb' if binary_mode else 'w'
-    with open(path, mode) as fh:
-      fh.write(content)
-    try:
-      yield
-    finally:
-      os.unlink(path)
-
-  @contextmanager
-  def with_overwritten_file_content(self, file_path, temporary_content=None):
-    """A helper that resets a file after the method runs.
-
-     It will read a file, save the content, maybe write temporary_content to it, yield, then write the
-     original content to the file.
-
-    :param file_path: Absolute path to the file to be reset after the method runs.
-    :param temporary_content: Optional content to write into the file.
-    """
-    with open(file_path, 'r') as f:
-      file_original_content = f.read()
-
-    try:
-      if temporary_content is not None:
-        with open(file_path, 'w') as f:
-          f.write(temporary_content)
-      yield
-
-    finally:
-      with open(file_path, 'w') as f:
-        f.write(file_original_content)
-
-  @contextmanager
-  def mock_buildroot(self, dirs_to_copy=None):
-    """Construct a mock buildroot and return a helper object for interacting with it."""
-
-    @dataclass(frozen=True)
-    class Manager:
-      write_file: Callable[[str, str], None]
-      pushd: Any
-      new_buildroot: str
-
-    # N.B. BUILD.tools, contrib, 3rdparty needs to be copied vs symlinked to avoid
-    # symlink prefix check error in v1 and v2 engine.
-    files_to_copy = ('BUILD.tools',)
-    files_to_link = (
-      'BUILD_ROOT',
-      '.pants.d',
-      'build-support',
-      # NB: when running with --chroot or the V2 engine, `pants` refers to the source root-stripped
-      # directory src/python/pants, not the script `./pants`.
-      'pants',
-      'pants.pex',
-      'pants-plugins',
-      'pants.ini',
-      'pants.travis-ci.ini',
-      'pyproject.toml',
-      'rust-toolchain',
-      'src',
-    )
-    dirs_to_copy = ('3rdparty', 'contrib') + tuple(dirs_to_copy or [])
-
-    with self.temporary_workdir() as tmp_dir:
-      for filename in files_to_copy:
-        shutil.copy(os.path.join(get_buildroot(), filename), os.path.join(tmp_dir, filename))
-
-      for dirname in dirs_to_copy:
-        shutil.copytree(os.path.join(get_buildroot(), dirname), os.path.join(tmp_dir, dirname))
-
-      for filename in files_to_link:
-        link_target = os.path.join(get_buildroot(), filename)
-        if os.path.exists(link_target):
-          os.symlink(link_target, os.path.join(tmp_dir, filename))
-
-      def write_file(file_path, contents):
-        full_file_path = os.path.join(tmp_dir, *file_path.split(os.pathsep))
-        safe_mkdir_for(full_file_path)
-        with open(full_file_path, 'w') as fh:
-          fh.write(contents)
-
-      @contextmanager
-      def dir_context():
-        with pushd(tmp_dir):
-          yield
-
-      yield Manager(write_file, dir_context, tmp_dir)
-
-  def do_command(self, *args, **kwargs) -> PantsResult:
-    """Wrapper around run_pants method.
-
-    :param args: command line arguments used to run pants
-    """
-    cmd = list(args)
-    success = kwargs.pop('success', True)
-    pants_run = self.run_pants(cmd, **kwargs)
-    if success:
-      self.assert_success(pants_run)
-    else:
-      self.assert_failure(pants_run)
-    return pants_run
-
-  @contextmanager
-  def do_command_yielding_workdir(self, *args, **kwargs):
-    cmd = list(args)
-    success = kwargs.pop('success', True)
-    with self.pants_results(cmd, **kwargs) as pants_run:
-      if success:
-        self.assert_success(pants_run)
-      else:
-        self.assert_failure(pants_run)
-      yield pants_run
+from pants.testutil.pants_run_integration_test import PantsJoinHandle as PantsJoinHandle  # noqa
+from pants.testutil.pants_run_integration_test import PantsResult as PantsResult  # noqa
+from pants.testutil.pants_run_integration_test import (
+  PantsRunIntegrationTest as PantsRunIntegrationTest,
+)  # noqa
+from pants.testutil.pants_run_integration_test import ensure_cached as ensure_cached  # noqa
+from pants.testutil.pants_run_integration_test import ensure_daemon as ensure_daemon  # noqa
+from pants.testutil.pants_run_integration_test import ensure_resolver as ensure_resolver  # noqa
+from pants.testutil.pants_run_integration_test import read_pantsd_log as read_pantsd_log  # noqa
+from pants.testutil.pants_run_integration_test import render_logs as render_logs  # noqa
diff --git a/tests/python/pants_test/subsystem/BUILD b/tests/python/pants_test/subsystem/BUILD
index 9c9f97167bc..b1b7fe392be 100644
--- a/tests/python/pants_test/subsystem/BUILD
+++ b/tests/python/pants_test/subsystem/BUILD
@@ -13,9 +13,6 @@ python_library(
   name='subsystem_utils',
   sources=['subsystem_util.py'],
   dependencies=[
-    'src/python/pants/base:deprecated',
-    'src/python/pants/build_graph',
-    'src/python/pants/subsystem',
-    'tests/python/pants_test/option/util',
+    'src/python/pants/testutil/subsystem',
   ],
 )
diff --git a/tests/python/pants_test/subsystem/subsystem_util.py b/tests/python/pants_test/subsystem/subsystem_util.py
index a57e4cd7793..6eb422a9b78 100644
--- a/tests/python/pants_test/subsystem/subsystem_util.py
+++ b/tests/python/pants_test/subsystem/subsystem_util.py
@@ -1,75 +1,6 @@
 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
 # Licensed under the Apache License, Version 2.0 (see LICENSE).
 
-from pants.subsystem.subsystem import Subsystem
-from pants_test.option.util.fakes import create_options_for_optionables
-
-
-def global_subsystem_instance(subsystem_type, options=None):
-  """Returns the global instance of a subsystem, for use in tests.
-
-  :API: public
-
-  :param type subsystem_type: The subclass of :class:`pants.subsystem.subsystem.Subsystem`
-                              to create.
-  :param options: dict of scope -> (dict of option name -> value).
-                  The scopes may be that of the global instance of the subsystem (i.e.,
-                  subsystem_type.options_scope) and/or the scopes of instances of the
-                  subsystems it transitively depends on.
-  """
-  init_subsystem(subsystem_type, options)
-  return subsystem_type.global_instance()
-
-
-def init_subsystems(subsystem_types, options=None):
-  """Initialize subsystems for use in tests.
-
-  Does not create an instance.  This function is for setting up subsystems that the code
-  under test creates.
-
-  Note that there is some redundancy between this function and BaseTest.context(for_subsystems=...).
-  TODO: Fix that.
-
-  :API: public
-
-  :param list subsystem_types: The subclasses of :class:`pants.subsystem.subsystem.Subsystem`
-                               to create.
-  :param options: dict of scope -> (dict of option name -> value).
-                  The scopes may be those of the global instances of the subsystems (i.e.,
-                  subsystem_type.options_scope) and/or the scopes of instances of the
-                  subsystems they transitively depend on.
-  """
-  optionables = set()
-  for s in subsystem_types:
-    if not Subsystem.is_subsystem_type(s):
-      raise TypeError('{} is not a subclass of `Subsystem`'.format(s))
-    for si in s.known_scope_infos():
-      optionables.add(si.optionable_cls)
-  if options:
-    allowed_scopes = {o.options_scope for o in optionables}
-    for scope in options.keys():
-      if scope != '' and scope not in allowed_scopes:
-        raise ValueError('`{}` is not the scope of any of these subsystems: {}'.format(
-          scope, optionables))
-  # Don't trample existing subsystem options, in case a test has set up some
-  # other subsystems in some other way.
-  updated_options = dict(Subsystem._options.items()) if Subsystem._options else {}
-  if options:
-    updated_options.update(options)
-  Subsystem.set_options(create_options_for_optionables(optionables, options=updated_options))
-
-
-def init_subsystem(subsystem_type, options=None):
-  """
-  Singular form of :func:`pants_test.subsystem.subsystem_util.init_subsystems`
-
-  :API: public
-
-  :param subsystem_type: The subclass of :class:`pants.subsystem.subsystem.Subsystem`
-                               to create.
-  :param options: dict of scope -> (dict of option name -> value).
-                  The scopes may be those of the global instance of the subsystem (i.e.,
-                  subsystem_type.options_scope) and/or the scopes of instance of the
-                  subsystem it transitively depends on.
-  """
-  init_subsystems([subsystem_type], options)
+from pants.testutil.subsystem.util import global_subsystem_instance as global_subsystem_instance  # noqa
+from pants.testutil.subsystem.util import init_subsystem as init_subsystem  # noqa
+from pants.testutil.subsystem.util import init_subsystems as init_subsystems  # noqa
diff --git a/tests/python/pants_test/task_test_base.py b/tests/python/pants_test/task_test_base.py
index 79da5267d5e..968cd94c1f5 100644
--- a/tests/python/pants_test/task_test_base.py
+++ b/tests/python/pants_test/task_test_base.py
@@ -1,391 +1,8 @@
 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
 # Licensed under the Apache License, Version 2.0 (see LICENSE).
 
-import glob
-import os
-import subprocess
-from abc import abstractmethod
-from contextlib import closing, contextmanager
-from dataclasses import dataclass
-from io import BytesIO
-from typing import Any, Tuple
-
-from pants.goal.goal import Goal
-from pants.ivy.bootstrapper import Bootstrapper
-from pants.task.console_task import ConsoleTask
-from pants.task.task import Task
-from pants.util.contextutil import temporary_dir
-from pants.util.memo import memoized_method
-from pants.util.meta import classproperty
-from pants_test.test_base import TestBase
-
-
-# TODO: Find a better home for this?
-def is_exe(name):
-  result = subprocess.call(['which', name], stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT)
-  return result == 0
-
-
-def ensure_cached(task_cls, expected_num_artifacts=None):
-  """Decorator for a task-executing unit test. Asserts that after running the
-  decorated test function, the cache for task_cls contains
-  expected_num_artifacts.
-
-  Uses a new temp dir for the artifact cache, and uses a glob based on the
-  task's synthesized subtype to find the cache directories within the new temp
-  dir which were generated by the actions performed within the test method.
-
-  :API: public
-
-  :param task_cls: Class of the task to check the artifact cache
-                   for (e.g. JarCreate).
-  :param expected_num_artifacts: Expected number of artifacts to be in the
-                                 task's cache after running the test. If
-                                 unspecified, will assert that the number of
-                                 artifacts in the cache is non-zero.
-  """
-  def decorator(test_fn):
-    def wrapper(self, *args, **kwargs):
-      with self.cache_check(expected_num_artifacts=expected_num_artifacts):
-        test_fn(self, *args, **kwargs)
-    return wrapper
-  return decorator
-
-
-class TaskTestBase(TestBase):
-  """A baseclass useful for testing a single Task type.
-
-  :API: public
-  """
-
-  options_scope = 'test_scope'
-
-  @classmethod
-  @abstractmethod
-  def task_type(cls):
-    """Subclasses must return the type of the Task subclass under test.
-
-    :API: public
-    """
-
-  def setUp(self):
-    """
-    :API: public
-    """
-    super().setUp()
-    self._testing_task_type = self.synthesize_task_subtype(self.task_type(), self.options_scope)
-    # We locate the workdir below the pants_workdir, which BaseTest locates within the BuildRoot.
-    # BaseTest cleans this up, so we don't need to.  We give it a stable name, so that we can
-    # use artifact caching to speed up tests.
-    self._test_workdir = os.path.join(self.pants_workdir, self.task_type().stable_name())
-    os.mkdir(self._test_workdir)
-    # TODO: Push this down to JVM-related tests only? Seems wrong to have an ivy-specific
-    # action in this non-JVM-specific, high-level base class.
-    Bootstrapper.reset_instance()
-
-  @property
-  def test_workdir(self):
-    """
-    :API: public
-    """
-    return self._test_workdir
-
-  def synthesize_task_subtype(self, task_type, options_scope):
-    """Creates a synthetic subclass of the task type.
-
-    Note that passing in a stable options scope will speed up some tests, as the scope may appear
-    in the paths of tools used by the task, and if these are stable, tests can get artifact
-    cache hits when bootstrapping these tools. This doesn't hurt test isolation, as we reset
-    class-level state between each test.
-
-    # TODO: Use the task type directly once we re-do the Task lifecycle.
-
-    :API: public
-
-    :param task_type: The task type to subtype.
-    :param options_scope: The scope to give options on the generated task type.
-    :return: A pair (type, options_scope)
-    """
-    subclass_name = 'test_{0}_{1}'.format(task_type.__name__, options_scope)
-    return type(subclass_name, (task_type,), {'_stable_name': task_type._compute_stable_name(),
-                                              'options_scope': options_scope})
-
-  def set_options(self, **kwargs):
-    """
-    :API: public
-    """
-    self.set_options_for_scope(self.options_scope, **kwargs)
-
-  def context(self, for_task_types=None, **kwargs):
-    """
-    :API: public
-    """
-    # Add in our task type.
-    for_task_types = [self._testing_task_type] + (for_task_types or [])
-    return super().context(for_task_types=for_task_types, **kwargs)
-
-  def create_task(self, context, workdir=None):
-    """
-    :API: public
-    """
-    if workdir is None:
-      workdir = self.test_workdir
-    return self._testing_task_type(context, workdir)
-
-  @contextmanager
-  def cache_check(self, expected_num_artifacts=None):
-    """Sets up a temporary artifact cache and checks that the yielded-to code populates it.
-
-    :param expected_num_artifacts: Expected number of artifacts to be in the cache after yielding.
-                                   If unspecified, will assert that the number of artifacts in the
-                                   cache is non-zero.
-    """
-    with temporary_dir() as artifact_cache:
-      self.set_options_for_scope('cache.{}'.format(self.options_scope),
-                                 write_to=[artifact_cache])
-
-      yield
-
-      cache_subdir_glob_str = os.path.join(artifact_cache, '*/')
-      cache_subdirs = glob.glob(cache_subdir_glob_str)
-
-      if expected_num_artifacts == 0:
-        self.assertEqual(len(cache_subdirs), 0)
-        return
-
-      self.assertEqual(len(cache_subdirs), 1)
-      task_cache = cache_subdirs[0]
-
-      num_artifacts = 0
-      for (_, _, files) in os.walk(task_cache):
-        num_artifacts += len(files)
-
-      if expected_num_artifacts is None:
-        self.assertNotEqual(num_artifacts, 0)
-      else:
-        self.assertEqual(num_artifacts, expected_num_artifacts)
-
-
-class ConsoleTaskTestBase(TaskTestBase):
-  """A base class useful for testing ConsoleTasks.
-
-  :API: public
-  """
-
-  def setUp(self):
-    """
-    :API: public
-    """
-    Goal.clear()
-    super().setUp()
-
-    task_type = self.task_type()
-    assert issubclass(task_type, ConsoleTask), \
-        'task_type() must return a ConsoleTask subclass, got %s' % task_type
-
-  def execute_task(self, targets=None, options=None):
-    """Creates a new task and executes it with the given config, command line args and targets.
-
-    :API: public
-
-    :param targets: Optional list of Target objects passed on the command line.
-    Returns the text output of the task.
-    """
-    options = options or {}
-    with closing(BytesIO()) as output:
-      self.set_options(**options)
-      context = self.context(target_roots=targets, console_outstream=output)
-      task = self.create_task(context)
-      task.execute()
-      return output.getvalue().decode()
-
-  def execute_console_task(self, targets=None, extra_targets=None, options=None,
-                           passthru_args=None, workspace=None, scheduler=None):
-    """Creates a new task and executes it with the given config, command line args and targets.
-
-    :API: public
-
-    :param options: option values.
-    :param targets: optional list of Target objects passed on the command line.
-    :param extra_targets: optional list of extra targets in the context in addition to those
-                          passed on the command line.
-    :param passthru_args: optional list of passthru_args
-    :param workspace: optional Workspace to pass into the context.
-
-    Returns the list of items returned from invoking the console task's console_output method.
-    """
-    options = options or {}
-    self.set_options(**options)
-    context = self.context(
-      target_roots=targets,
-      passthru_args=passthru_args,
-      workspace=workspace,
-      scheduler=scheduler
-    )
-    return self.execute_console_task_given_context(context, extra_targets=extra_targets)
-
-  def execute_console_task_given_context(self, context, extra_targets=None):
-    """Creates a new task and executes it with the context and extra targets.
-
-    :API: public
-
-    :param context: The pants run context to use.
-    :param extra_targets: An optional list of extra targets in the context in addition to those
-                          passed on the command line.
-    :returns: The list of items returned from invoking the console task's console_output method.
-    :rtype: list of strings
-    """
-    task = self.create_task(context)
-    input_targets = task.get_targets() if task.act_transitively else context.target_roots
-    return list(task.console_output(list(input_targets) + list(extra_targets or ())))
-
-  def assert_entries(self, sep, *output, **kwargs):
-    """Verifies the expected output text is flushed by the console task under test.
-
-    NB: order of entries is not tested, just presence.
-
-    :API: public
-
-    sep:      the expected output separator.
-    *output:  the output entries expected between the separators
-    **options: additional options passed to execute_task.
-    """
-    # We expect each output line to be suffixed with the separator, so for , and [1,2,3] we expect:
-    # '1,2,3,' - splitting this by the separator we should get ['1', '2', '3', ''] - always an extra
-    # empty string if the separator is properly always a suffix and not applied just between
-    # entries.
-    self.assertEqual(sorted(list(output) + ['']), sorted((self.execute_task(**kwargs)).split(sep)))
-
-  def assert_console_output(self, *output, **kwargs):
-    """Verifies the expected output entries are emitted by the console task under test.
-
-    NB: order of entries is not tested, just presence.
-
-    :API: public
-
-    *output:  the expected output entries
-    **kwargs: additional kwargs passed to execute_console_task.
-    """
-    self.assertEqual(sorted(output), sorted(self.execute_console_task(**kwargs)))
-
-  def assert_console_output_contains(self, output, **kwargs):
-    """Verifies the expected output string is emitted by the console task under test.
-
-    :API: public
-
-    output:  the expected output entry(ies)
-    **kwargs: additional kwargs passed to execute_console_task.
-    """
-    self.assertIn(output, self.execute_console_task(**kwargs))
-
-  def assert_console_output_ordered(self, *output, **kwargs):
-    """Verifies the expected output entries are emitted by the console task under test.
-
-    NB: order of entries is tested.
-
-    :API: public
-
-    *output:  the expected output entries in expected order
-    **kwargs: additional kwargs passed to execute_console_task.
-    """
-    self.assertEqual(list(output), self.execute_console_task(**kwargs))
-
-  def assert_console_raises(self, exception, **kwargs):
-    """Verifies the expected exception is raised by the console task under test.
-
-    :API: public
-
-    **kwargs: additional kwargs are passed to execute_console_task.
-    """
-    with self.assertRaises(exception):
-      self.execute_console_task(**kwargs)
-
-
-class DeclarativeTaskTestMixin:
-  """Experimental mixin for task tests allows specifying tasks to be run before or after the task.
-
-  Calling `self.invoke_tasks()` will create instances of and execute task types in
-  `self.run_before_task_types()`, then `task_type()`, then `self.run_after_task_types()`.
-  """
-
-  @classproperty
-  def run_before_task_types(cls):
-    return []
-
-  @classproperty
-  def run_after_task_types(cls):
-    return []
-
-  @memoized_method
-  def _synthesize_task_types(self, task_types=()):
-    return [
-      self.synthesize_task_subtype(tsk, '__tmp_{}'.format(tsk.__name__))
-      # TODO(#7127): make @memoized_method convert lists to tuples for hashing!
-      for tsk in task_types
-    ]
-
-  def _create_task(self, task_type, context):
-    """Helper method to instantiate tasks besides self._testing_task_type in the test workdir."""
-    return task_type(context, self.test_workdir)
-
-  @dataclass(frozen=True)
-  class TaskInvocationResult:
-    context: Any
-    before_tasks: Tuple[Task, ...]
-    this_task: Task
-    after_tasks: Tuple[Task, ...]
-
-  def invoke_tasks(self, target_closure=None, **context_kwargs) -> 'TaskInvocationResult':
-    """Create and execute the declaratively specified tasks in order.
-
-    Create instances of and execute task types in `self.run_before_task_types()`, then
-    `task_type()`, then `self.run_after_task_types()`.
-
-    :param Iterable target_closure: If not None, check that the build graph contains exactly these
-                                    targets before executing the tasks.
-    :param **context_kwargs: kwargs passed to `self.context()`. Note that this method already sets
-                                    `for_task_types`.
-    :return: A datatype containing the created context and the task instances which were executed.
-    :raises: If any exception is raised during task execution, the context will be attached to the
-             exception object as the attribute '_context' with setattr() before re-raising.
-    """
-    run_before_synthesized_task_types = self._synthesize_task_types(tuple(self.run_before_task_types))
-    run_after_synthesized_task_types = self._synthesize_task_types(tuple(self.run_after_task_types))
-    all_synthesized_task_types = run_before_synthesized_task_types + [
-      self._testing_task_type,
-    ] + run_after_synthesized_task_types
-
-    context = self.context(
-      for_task_types=all_synthesized_task_types,
-      **context_kwargs)
-    if target_closure is not None:
-      self.assertEqual(set(target_closure), set(context.build_graph.targets()))
-
-    run_before_task_instances = [
-      self._create_task(task_type, context)
-      for task_type in run_before_synthesized_task_types
-    ]
-    current_task_instance = self._create_task(
-      self._testing_task_type, context)
-    run_after_task_instances = [
-      self._create_task(task_type, context)
-      for task_type in run_after_synthesized_task_types
-    ]
-    all_task_instances = run_before_task_instances + [
-      current_task_instance
-    ] + run_after_task_instances
-
-    try:
-      for tsk in all_task_instances:
-        tsk.execute()
-    except Exception as e:
-      # TODO(#7644): Remove this hack before anything more starts relying on it!
-      setattr(e, '_context', context)
-      raise e
-
-    return self.TaskInvocationResult(
-      context=context,
-      before_tasks=tuple(run_before_task_instances),
-      this_task=current_task_instance,
-      after_tasks=tuple(run_after_task_instances),
-    )
+from pants.testutil.task_test_base import ConsoleTaskTestBase as ConsoleTaskTestBase  # noqa
+from pants.testutil.task_test_base import DeclarativeTaskTestMixin as DeclarativeTaskTestMixin  # noqa
+from pants.testutil.task_test_base import TaskTestBase as TaskTestBase  # noqa
+from pants.testutil.task_test_base import ensure_cached as ensure_cached  # noqa
+from pants.testutil.task_test_base import is_exe as is_exe  # noqa
diff --git a/tests/python/pants_test/test_base.py b/tests/python/pants_test/test_base.py
index d8b9450d73f..090457a5e6c 100644
--- a/tests/python/pants_test/test_base.py
+++ b/tests/python/pants_test/test_base.py
@@ -1,781 +1,5 @@
 # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
 # Licensed under the Apache License, Version 2.0 (see LICENSE).
 
-import itertools
-import logging
-import os
-import unittest
-import warnings
-from abc import ABC, ABCMeta, abstractmethod
-from collections import defaultdict
-from contextlib import contextmanager
-from tempfile import mkdtemp
-from textwrap import dedent
-
-from pants.base.build_root import BuildRoot
-from pants.base.cmd_line_spec_parser import CmdLineSpecParser
-from pants.base.exceptions import TaskError
-from pants.base.target_roots import TargetRoots
-from pants.build_graph.address import Address
-from pants.build_graph.build_configuration import BuildConfiguration
-from pants.build_graph.build_file_aliases import BuildFileAliases
-from pants.build_graph.target import Target
-from pants.engine.fs import PathGlobs, PathGlobsAndRoot
-from pants.engine.legacy.graph import HydratedField
-from pants.engine.legacy.structs import SourcesField
-from pants.engine.rules import RootRule
-from pants.init.engine_initializer import EngineInitializer
-from pants.init.util import clean_global_runtime_state
-from pants.option.options_bootstrapper import OptionsBootstrapper
-from pants.source.source_root import SourceRootConfig
-from pants.subsystem.subsystem import Subsystem
-from pants.task.goal_options_mixin import GoalOptionsMixin
-from pants.util.collections import assert_single_element
-from pants.util.contextutil import temporary_dir
-from pants.util.dirutil import (
-  recursive_dirname,
-  relative_symlink,
-  safe_file_dump,
-  safe_mkdir,
-  safe_mkdtemp,
-  safe_open,
-  safe_rmtree,
-)
-from pants.util.memo import memoized_method
-from pants.util.meta import classproperty
-from pants_test.base.context_utils import create_context_from_options
-from pants_test.engine.util import init_native
-from pants_test.option.util.fakes import create_options_for_optionables
-from pants_test.subsystem import subsystem_util
-
-
-class AbstractTestGenerator(ABC):
-  """A mixin that facilitates test generation at runtime."""
-
-  @classmethod
-  @abstractmethod
-  def generate_tests(cls):
-    """Generate tests for a given class.
-
-    This should be called against the composing class in its defining module, e.g.
-
-      class ThingTest(TestGenerator):
-        ...
-
-      ThingTest.generate_tests()
-
-    """
-
-  @classmethod
-  def add_test(cls, method_name, method):
-    """A classmethod that adds dynamic test methods to a given class.
-
-    :param string method_name: The name of the test method (e.g. `test_thing_x`).
-    :param callable method: A callable representing the method. This should take a 'self' argument
-                            as its first parameter for instance method binding.
-    """
-    assert not hasattr(cls, method_name), (
-      'a test with name `{}` already exists on `{}`!'.format(method_name, cls.__name__)
-    )
-    assert method_name.startswith('test_'), '{} is not a valid test name!'.format(method_name)
-    setattr(cls, method_name, method)
-
-
-class TestBase(unittest.TestCase, metaclass=ABCMeta):
-  """A baseclass useful for tests requiring a temporary buildroot.
-
-  :API: public
-  """
-
-  _scheduler = None
-  _local_store_dir = None
-  _build_graph = None
-  _address_mapper = None
-
-  def build_path(self, relpath):
-    """Returns the canonical BUILD file path for the given relative build path.
-
-    :API: public
-    """
-    if os.path.basename(relpath).startswith('BUILD'):
-      return relpath
-    else:
-      return os.path.join(relpath, 'BUILD')
-
-  def create_dir(self, relpath):
-    """Creates a directory under the buildroot.
-
-    :API: public
-
-    relpath: The relative path to the directory from the build root.
-    """
-    path = os.path.join(self.build_root, relpath)
-    safe_mkdir(path)
-    self.invalidate_for(relpath)
-    return path
-
-  def create_workdir_dir(self, relpath):
-    """Creates a directory under the work directory.
-
-    :API: public
-
-    relpath: The relative path to the directory from the work directory.
-    """
-    path = os.path.join(self.pants_workdir, relpath)
-    safe_mkdir(path)
-    self.invalidate_for(relpath)
-    return path
-
-  def invalidate_for(self, *relpaths):
-    """Invalidates all files from the relpath, recursively up to the root.
-
-    Many python operations implicitly create parent directories, so we assume that touching a
-    file located below directories that do not currently exist will result in their creation.
-    """
-    if self._scheduler is None:
-      return
-    files = {f for relpath in relpaths for f in recursive_dirname(relpath)}
-    return self._scheduler.invalidate_files(files)
-
-  def create_link(self, relsrc, reldst):
-    """Creates a symlink within the buildroot.
-
-    :API: public
-
-    relsrc: A relative path for the source of the link.
-    reldst: A relative path for the destination of the link.
-    """
-    src = os.path.join(self.build_root, relsrc)
-    dst = os.path.join(self.build_root, reldst)
-    relative_symlink(src, dst)
-    self.invalidate_for(reldst)
-
-  def create_file(self, relpath, contents='', mode='w'):
-    """Writes to a file under the buildroot.
-
-    :API: public
-
-    relpath:  The relative path to the file from the build root.
-    contents: A string containing the contents of the file - '' by default..
-    mode:     The mode to write to the file in - over-write by default.
-    """
-    path = os.path.join(self.build_root, relpath)
-    with safe_open(path, mode=mode) as fp:
-      fp.write(contents)
-    self.invalidate_for(relpath)
-    return path
-
-  def create_files(self, path, files):
-    """Writes to a file under the buildroot with contents same as file name.
-
-    :API: public
-
-     path:  The relative path to the file from the build root.
-     files: List of file names.
-    """
-    for f in files:
-      self.create_file(os.path.join(path, f), contents=f)
-
-  def create_workdir_file(self, relpath, contents='', mode='w'):
-    """Writes to a file under the work directory.
-
-    :API: public
-
-    relpath:  The relative path to the file from the work directory.
-    contents: A string containing the contents of the file - '' by default..
-    mode:     The mode to write to the file in - over-write by default.
-    """
-    path = os.path.join(self.pants_workdir, relpath)
-    with safe_open(path, mode=mode) as fp:
-      fp.write(contents)
-    return path
-
-  def add_to_build_file(self, relpath, target):
-    """Adds the given target specification to the BUILD file at relpath.
-
-    :API: public
-
-    relpath: The relative path to the BUILD file from the build root.
-    target:  A string containing the target definition as it would appear in a BUILD file.
-    """
-    self.create_file(self.build_path(relpath), target, mode='a')
-
-  def make_target(self,
-                  spec='',
-                  target_type=Target,
-                  dependencies=None,
-                  derived_from=None,
-                  synthetic=False,
-                  make_missing_sources=True,
-                  **kwargs):
-    """Creates a target and injects it into the test's build graph.
-
-    :API: public
-
-    :param string spec: The target address spec that locates this target.
-    :param type target_type: The concrete target subclass to create this new target from.
-    :param list dependencies: A list of target instances this new target depends on.
-    :param derived_from: The target this new target was derived from.
-    :type derived_from: :class:`pants.build_graph.target.Target`
-    """
-    self._init_target_subsystem()
-
-    address = Address.parse(spec)
-
-    if make_missing_sources and 'sources' in kwargs:
-      for source in kwargs['sources']:
-        if '*' not in source:
-          self.create_file(os.path.join(address.spec_path, source), mode='a', contents='')
-      kwargs['sources'] = self.sources_for(kwargs['sources'], address.spec_path)
-
-    target = target_type(name=address.target_name,
-                         address=address,
-                         build_graph=self.build_graph,
-                         **kwargs)
-    dependencies = dependencies or []
-
-    self.build_graph.apply_injectables([target])
-    self.build_graph.inject_target(target,
-                                   dependencies=[dep.address for dep in dependencies],
-                                   derived_from=derived_from,
-                                   synthetic=synthetic)
-
-    # TODO(John Sirois): This re-creates a little bit too much work done by the BuildGraph.
-    # Fixup the BuildGraph to deal with non BuildFileAddresses better and just leverage it.
-    traversables = [target.compute_dependency_specs(payload=target.payload)]
-
-    for dependency_spec in itertools.chain(*traversables):
-      dependency_address = Address.parse(dependency_spec, relative_to=address.spec_path)
-      dependency_target = self.build_graph.get_target(dependency_address)
-      if not dependency_target:
-        raise ValueError('Tests must make targets for dependency specs ahead of them '
-                         'being traversed, {} tried to traverse {} which does not exist.'
-                         .format(target, dependency_address))
-      if dependency_target not in target.dependencies:
-        self.build_graph.inject_dependency(dependent=target.address,
-                                           dependency=dependency_address)
-        target.mark_transitive_invalidation_hash_dirty()
-
-    return target
-
-  def sources_for(self, package_relative_path_globs, package_dir=''):
-    sources_field = SourcesField(
-      Address.parse('{}:_bogus_target_for_test'.format(package_dir)),
-      'sources',
-      {'globs': package_relative_path_globs},
-      None,
-      PathGlobs(tuple(os.path.join(package_dir, path) for path in package_relative_path_globs)),
-      lambda _: True,
-    )
-    field = self.scheduler.product_request(HydratedField, [sources_field])[0]
-    return field.value
-
-  @classmethod
-  def alias_groups(cls):
-    """
-    :API: public
-    """
-    return BuildFileAliases(targets={'target': Target})
-
-  @classmethod
-  def rules(cls):
-    # Required for sources_for:
-    return [RootRule(SourcesField)]
-
-  @classmethod
-  def build_config(cls):
-    build_config = BuildConfiguration()
-    build_config.register_aliases(cls.alias_groups())
-    build_config.register_rules(cls.rules())
-    return build_config
-
-  def setUp(self):
-    """
-    :API: public
-    """
-    super().setUp()
-    # Avoid resetting the Runtracker here, as that is specific to fork'd process cleanup.
-    clean_global_runtime_state(reset_subsystem=True)
-
-    self.addCleanup(self._reset_engine)
-
-    safe_mkdir(self.build_root, clean=True)
-    safe_mkdir(self.pants_workdir)
-    self.addCleanup(safe_rmtree, self.build_root)
-
-    BuildRoot().path = self.build_root
-    self.addCleanup(BuildRoot().reset)
-
-    self.subprocess_dir = os.path.join(self.build_root, '.pids')
-
-    self.options = defaultdict(dict)  # scope -> key-value mapping.
-    self.options[''] = {
-      'pants_workdir': self.pants_workdir,
-      'pants_supportdir': os.path.join(self.build_root, 'build-support'),
-      'pants_distdir': os.path.join(self.build_root, 'dist'),
-      'pants_configdir': os.path.join(self.build_root, 'config'),
-      'pants_subprocessdir': self.subprocess_dir,
-      'cache_key_gen_version': '0-test',
-    }
-    self.options['cache'] = {
-      'read_from': [],
-      'write_to': [],
-    }
-
-    self._build_configuration = self.build_config()
-    self._inited_target = False
-    subsystem_util.init_subsystem(Target.TagAssignments)
-
-  def buildroot_files(self, relpath=None):
-    """Returns the set of all files under the test build root.
-
-    :API: public
-
-    :param string relpath: If supplied, only collect files from this subtree.
-    :returns: All file paths found.
-    :rtype: set
-    """
-    def scan():
-      for root, dirs, files in os.walk(os.path.join(self.build_root, relpath or '')):
-        for f in files:
-          yield os.path.relpath(os.path.join(root, f), self.build_root)
-    return set(scan())
-
-  def _reset_engine(self):
-    if self._scheduler is not None:
-      self._build_graph.reset()
-      self._scheduler.invalidate_all_files()
-
-  @classmethod
-  def aggressively_reset_scheduler(cls):
-    cls._scheduler = None
-    if cls._local_store_dir is not None:
-      safe_rmtree(cls._local_store_dir)
-
-  @classmethod
-  @contextmanager
-  def isolated_local_store(cls):
-    cls.aggressively_reset_scheduler()
-    cls._init_engine()
-    try:
-      yield
-    finally:
-      cls.aggressively_reset_scheduler()
-
-  @property
-  def build_root(self):
-    return self._build_root()
-
-  @property
-  def pants_workdir(self):
-    return self._pants_workdir()
-
-  @classmethod
-  @memoized_method
-  def _build_root(cls):
-    return os.path.realpath(mkdtemp(suffix='_BUILD_ROOT'))
-
-  @classmethod
-  @memoized_method
-  def _pants_workdir(cls):
-    return os.path.join(cls._build_root(), '.pants.d')
-
-  @classmethod
-  def _init_engine(cls):
-    if cls._scheduler is not None:
-      return
-
-    cls._local_store_dir = os.path.realpath(safe_mkdtemp())
-    safe_mkdir(cls._local_store_dir)
-
-    # NB: This uses the long form of initialization because it needs to directly specify
-    # `cls.alias_groups` rather than having them be provided by bootstrap options.
-    graph_session = EngineInitializer.setup_legacy_graph_extended(
-      pants_ignore_patterns=None,
-      local_store_dir=cls._local_store_dir,
-      build_file_imports_behavior='allow',
-      native=init_native(),
-      options_bootstrapper=OptionsBootstrapper.create(args=['--pants-config-files=[]']),
-      build_configuration=cls.build_config(),
-      build_ignore_patterns=None,
-    ).new_session(zipkin_trace_v2=False, build_id="buildid_for_test")
-    cls._scheduler = graph_session.scheduler_session
-    cls._build_graph, cls._address_mapper = graph_session.create_build_graph(
-        TargetRoots([]), cls._build_root()
-      )
-
-  @property
-  def scheduler(self):
-    if self._scheduler is None:
-      self._init_engine()
-      self.post_scheduler_init()
-    return self._scheduler
-
-  def post_scheduler_init(self):
-    """Run after initializing the Scheduler, it will have the same lifetime"""
-    pass
-
-  @property
-  def address_mapper(self):
-    if self._address_mapper is None:
-      self._init_engine()
-    return self._address_mapper
-
-  @property
-  def build_graph(self):
-    if self._build_graph is None:
-      self._init_engine()
-    return self._build_graph
-
-  def reset_build_graph(self, reset_build_files=False, delete_build_files=False):
-    """Start over with a fresh build graph with no targets in it."""
-    if delete_build_files or reset_build_files:
-      files = [f for f in self.buildroot_files() if os.path.basename(f) == 'BUILD']
-      if delete_build_files:
-        for f in files:
-          os.remove(os.path.join(self.build_root, f))
-      self.invalidate_for(*files)
-    if self._build_graph is not None:
-      self._build_graph.reset()
-
-  def set_options_for_scope(self, scope, **kwargs):
-    self.options[scope].update(kwargs)
-
-  def context(self, for_task_types=None, for_subsystems=None, options=None,
-              target_roots=None, console_outstream=None, workspace=None,
-              scheduler=None, address_mapper=None, **kwargs):
-    """
-    :API: public
-
-    :param dict **kwargs: keyword arguments passed in to `create_options_for_optionables`.
-    """
-    # Many tests use source root functionality via the SourceRootConfig.global_instance().
-    # (typically accessed via Target.target_base), so we always set it up, for convenience.
-    for_subsystems = set(for_subsystems or ())
-    for subsystem in for_subsystems:
-      if subsystem.options_scope is None:
-        raise TaskError('You must set a scope on your subsystem type before using it in tests.')
-
-    optionables = {SourceRootConfig} | self._build_configuration.optionables() | for_subsystems
-
-    for_task_types = for_task_types or ()
-    for task_type in for_task_types:
-      scope = task_type.options_scope
-      if scope is None:
-        raise TaskError('You must set a scope on your task type before using it in tests.')
-      optionables.add(task_type)
-      # If task is expected to inherit goal-level options, register those directly on the task,
-      # by subclassing the goal options registrar and settings its scope to the task scope.
-      if issubclass(task_type, GoalOptionsMixin):
-        subclass_name = 'test_{}_{}_{}'.format(
-          task_type.__name__, task_type.goal_options_registrar_cls.options_scope,
-          task_type.options_scope)
-        optionables.add(type(subclass_name, (task_type.goal_options_registrar_cls, ),
-                             {'options_scope': task_type.options_scope}))
-
-    # Now expand to all deps.
-    all_optionables = set()
-    for optionable in optionables:
-      all_optionables.update(si.optionable_cls for si in optionable.known_scope_infos())
-
-    # Now default the option values and override with any caller-specified values.
-    # TODO(benjy): Get rid of the options arg, and require tests to call set_options.
-    options = options.copy() if options else {}
-    for s, opts in self.options.items():
-      scoped_opts = options.setdefault(s, {})
-      scoped_opts.update(opts)
-
-    fake_options = create_options_for_optionables(
-      all_optionables, options=options, **kwargs)
-
-    Subsystem.reset(reset_options=True)
-    Subsystem.set_options(fake_options)
-
-    scheduler = scheduler or self.scheduler
-
-    address_mapper = address_mapper or self.address_mapper
-
-    context = create_context_from_options(fake_options,
-                                          target_roots=target_roots,
-                                          build_graph=self.build_graph,
-                                          build_configuration=self._build_configuration,
-                                          address_mapper=address_mapper,
-                                          console_outstream=console_outstream,
-                                          workspace=workspace,
-                                          scheduler=scheduler)
-    return context
-
-  def tearDown(self):
-    """
-    :API: public
-    """
-    super().tearDown()
-    Subsystem.reset()
-
-  @classproperty
-  def subsystems(cls):
-    """Initialize these subsystems when running your test.
-
-    If your test instantiates a target type that depends on any subsystems, those subsystems need to
-    be initialized in your test. You can override this property to return the necessary subsystem
-    classes.
-
-    :rtype: list of type objects, all subclasses of Subsystem
-    """
-    return Target.subsystems()
-
-  def _init_target_subsystem(self):
-    if not self._inited_target:
-      subsystem_util.init_subsystems(self.subsystems)
-      self._inited_target = True
-
-  def target(self, spec):
-    """Resolves the given target address to a Target object.
-
-    :API: public
-
-    address: The BUILD target address to resolve.
-
-    Returns the corresponding Target or else None if the address does not point to a defined Target.
-    """
-    self._init_target_subsystem()
-
-    address = Address.parse(spec)
-    self.build_graph.inject_address_closure(address)
-    return self.build_graph.get_target(address)
-
-  def targets(self, spec):
-    """Resolves a target spec to one or more Target objects.
-
-    :API: public
-
-    spec: Either BUILD target address or else a target glob using the siblings ':' or
-          descendants '::' suffixes.
-
-    Returns the set of all Targets found.
-    """
-
-    spec = CmdLineSpecParser(self.build_root).parse_spec(spec)
-    targets = []
-    for address in self.build_graph.inject_specs_closure([spec]):
-      targets.append(self.build_graph.get_target(address))
-    return targets
-
-  def create_library(self, path, target_type, name, sources=None, **kwargs):
-    """Creates a library target of given type at the BUILD file at path with sources
-
-    :API: public
-
-     path: The relative path to the BUILD file from the build root.
-     target_type: valid pants target type.
-     name: Name of the library target.
-     sources: List of source file at the path relative to path.
-     **kwargs: Optional attributes that can be set for any library target.
-       Currently it includes support for resources, java_sources, provides
-       and dependencies.
-    """
-    if sources:
-      self.create_files(path, sources)
-    self.add_to_build_file(path, dedent('''
-          %(target_type)s(name='%(name)s',
-            %(sources)s
-            %(java_sources)s
-            %(provides)s
-            %(dependencies)s
-          )
-        ''' % dict(target_type=target_type,
-                   name=name,
-                   sources=('sources=%s,' % repr(sources)
-                              if sources else ''),
-                   java_sources=('java_sources=[%s],'
-                                 % ','.join('"%s"' % str_target for str_target in kwargs.get('java_sources'))
-                                 if 'java_sources' in kwargs else ''),
-                   provides=('provides=%s,' % kwargs.get('provides')
-                              if 'provides' in kwargs else ''),
-                   dependencies=('dependencies=%s,' % kwargs.get('dependencies')
-                              if 'dependencies' in kwargs else ''),
-                   )))
-    return self.target('%s:%s' % (path, name))
-
-  def create_resources(self, path, name, *sources):
-    """
-    :API: public
-    """
-    return self.create_library(path, 'resources', name, sources)
-
-  def assertUnorderedPrefixEqual(self, expected, actual_iter):
-    """Consumes len(expected) items from the given iter, and asserts that they match, unordered.
-
-    :API: public
-    """
-    actual = list(itertools.islice(actual_iter, len(expected)))
-    self.assertEqual(sorted(expected), sorted(actual))
-
-  def assertPrefixEqual(self, expected, actual_iter):
-    """Consumes len(expected) items from the given iter, and asserts that they match, in order.
-
-    :API: public
-    """
-    self.assertEqual(expected, list(itertools.islice(actual_iter, len(expected))))
-
-  def assertInFile(self, string, file_path):
-    """Verifies that a string appears in a file
-
-    :API: public
-    """
-
-    with open(file_path, 'r') as f:
-      content = f.read()
-      self.assertIn(string, content, '"{}" is not in the file {}:\n{}'.format(string, f.name, content))
-
-  @contextmanager
-  def assertRaisesWithMessage(self, exception_type, error_text):
-    """Verifies than an exception message is equal to `error_text`.
-
-    :param type exception_type: The exception type which is expected to be raised within the body.
-    :param str error_text: Text that the exception message should match exactly with
-                           `self.assertEqual()`.
-    :API: public
-    """
-    with self.assertRaises(exception_type) as cm:
-      yield cm
-    self.assertEqual(error_text, str(cm.exception))
-
-  @contextmanager
-  def assertRaisesWithMessageContaining(self, exception_type, error_text):
-    """Verifies that the string `error_text` appears in an exception message.
-
-    :param type exception_type: The exception type which is expected to be raised within the body.
-    :param str error_text: Text that the exception message should contain with `self.assertIn()`.
-    :API: public
-    """
-    with self.assertRaises(exception_type) as cm:
-      yield cm
-    self.assertIn(error_text, str(cm.exception))
-
-  def get_bootstrap_options(self, cli_options=()):
-    """Retrieves bootstrap options.
-
-    :param cli_options: An iterable of CLI flags to pass as arguments to `OptionsBootstrapper`.
-    """
-    args = tuple(['--pants-config-files=[]']) + tuple(cli_options)
-    return OptionsBootstrapper.create(args=args).bootstrap_options.for_global_scope()
-
-  def make_snapshot(self, files):
-    """Makes a snapshot from a collection of files.
-
-    :param files: a dictionary, where key=filename, value=file_content where both are of type String.
-    :return: a Snapshot.
-    """
-    with temporary_dir() as temp_dir:
-      for file_name, content in files.items():
-        safe_file_dump(os.path.join(temp_dir, file_name), content)
-      return self.scheduler.capture_snapshots((
-        PathGlobsAndRoot(PathGlobs(('**',)), temp_dir),
-      ))[0]
-
-  class LoggingRecorder:
-    """Simple logging handler to record warnings."""
-
-    def __init__(self):
-      self._records = []
-      self.level = logging.DEBUG
-
-    def handle(self, record):
-      self._records.append(record)
-
-    def _messages_for_level(self, levelname):
-      return ['{}: {}'.format(record.name, record.getMessage())
-              for record in self._records if record.levelname == levelname]
-
-    def infos(self):
-      return self._messages_for_level('INFO')
-
-    def warnings(self):
-      return self._messages_for_level('WARNING')
-
-    def errors(self):
-      return self._messages_for_level('ERROR')
-
-  @contextmanager
-  def captured_logging(self, level=None):
-    root_logger = logging.getLogger()
-
-    old_level = root_logger.level
-    root_logger.setLevel(level or logging.NOTSET)
-
-    handler = self.LoggingRecorder()
-    root_logger.addHandler(handler)
-    try:
-      yield handler
-    finally:
-      root_logger.setLevel(old_level)
-      root_logger.removeHandler(handler)
-
-  @contextmanager
-  def warnings_catcher(self):
-    with warnings.catch_warnings(record=True) as w:
-      warnings.simplefilter('always')
-      yield w
-
-  def assertWarning(self, w, category, warning_text):
-    single_warning = assert_single_element(w)
-    self.assertEqual(single_warning.category, category)
-    warning_message = single_warning.message
-    self.assertEqual(warning_text, str(warning_message))
-
-  def retrieve_single_product_at_target_base(self, product_mapping, target):
-    mapping_for_target = product_mapping.get(target)
-    single_base_dir = assert_single_element(list(mapping_for_target.keys()))
-    single_product = assert_single_element(mapping_for_target[single_base_dir])
-    return single_product
-
-  def populate_target_dict(self, target_map):
-    """Return a dict containing targets with files generated according to `target_map`.
-
-    The keys of `target_map` are target address strings, while the values of `target_map` should be
-    a dict which contains keyword arguments fed into `self.make_target()`, along with a few special
-    keys. Special keys are:
-    - 'key': used to access the target in the returned dict. Defaults to the target address spec.
-    - 'filemap': creates files at the specified relative paths to the target.
-
-    An `OrderedDict` of 2-tuples must be used with the targets topologically ordered, if
-    they have dependencies on each other. Note that dependency cycles are not currently supported
-    with this method.
-
-    :param target_map: Dict mapping each target address to generate -> kwargs for
-                       `self.make_target()`, along with a 'key' and optionally a 'filemap' argument.
-    :return: Dict mapping the required 'key' argument -> target instance for each element of
-             `target_map`.
-    :rtype: dict
-    """
-    target_dict = {}
-
-    # Create a target from each specification and insert it into `target_dict`.
-    for target_spec, target_kwargs in target_map.items():
-      unprocessed_kwargs = target_kwargs.copy()
-
-      target_base = Address.parse(target_spec).spec_path
-
-      # Populate the target's owned files from the specification.
-      filemap = unprocessed_kwargs.pop('filemap', {})
-      for rel_path, content in filemap.items():
-        buildroot_path = os.path.join(target_base, rel_path)
-        self.create_file(buildroot_path, content)
-
-      # Ensure any dependencies exist in the target dict (`target_map` must then be an
-      # OrderedDict).
-      # The 'key' is used to access the target in `target_dict`, and defaults to `target_spec`.
-      target_address = Address.parse(target_spec)
-      key = unprocessed_kwargs.pop('key', target_address.target_name)
-      dep_targets = []
-      for dep_spec in unprocessed_kwargs.pop('dependencies', []):
-        existing_tgt_key = target_map[dep_spec]['key']
-        dep_targets.append(target_dict[existing_tgt_key])
-
-      # Register the generated target.
-      generated_target = self.make_target(
-        spec=target_spec, dependencies=dep_targets, **unprocessed_kwargs)
-      target_dict[key] = generated_target
-
-    return target_dict
+from pants.testutil.test_base import AbstractTestGenerator as AbstractTestGenerator  # noqa
+from pants.testutil.test_base import TestBase as TestBase  # noqa
diff --git a/tests/python/pants_test/testutils/BUILD b/tests/python/pants_test/testutils/BUILD
index ebf35f8fd08..5a4e3ed76fc 100644
--- a/tests/python/pants_test/testutils/BUILD
+++ b/tests/python/pants_test/testutils/BUILD
@@ -5,7 +5,7 @@ python_library(
   name = 'mock_logger',
   sources = globs('mock_logger.py'),
   dependencies = [
-    'src/python/pants/reporting',
+    'src/python/pants/testutil:mock_logger',
   ],
 )
 
@@ -15,6 +15,7 @@ python_library(
     'file_test_util.py',
   ],
   dependencies = [
+    'src/python/pants/testutil:file_test_util',
   ],
 )
 
@@ -24,9 +25,7 @@ python_library(
     'git_util.py',
   ],
   dependencies = [
-    'src/python/pants/base:revision',
-    'src/python/pants/scm:git',
-    'src/python/pants/util:contextutil',
+    'src/python/pants/testutil:git_util',
   ],
 )
 
@@ -36,8 +35,7 @@ python_library(
     'process_test_util.py',
   ],
   dependencies = [
-    '3rdparty/python:dataclasses',
-    '3rdparty/python:psutil',
+    'src/python/pants/testutil:process_test_util',
   ],
 )
 
@@ -47,6 +45,6 @@ python_library(
     'pexrc_util.py',
   ],
   dependencies = [
-    ':git_util'
-  ,]
+    'src/python/pants/testutil:pexrc_util',
+  ],
 )
diff --git a/tests/python/pants_test/testutils/file_test_util.py b/tests/python/pants_test/testutils/file_test_util.py
index 549ba3d33c3..7d9eebce466 100644
--- a/tests/python/pants_test/testutils/file_test_util.py
+++ b/tests/python/pants_test/testutils/file_test_util.py
@@ -1,64 +1,7 @@
 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
 # Licensed under the Apache License, Version 2.0 (see LICENSE).
 
-import os
-
-
-def exact_files(directory, ignore_links=False):
-  """Returns the relative files contained in the directory.
-
-  :API: public
-
-  :param str directory: Path to directory to search.
-  :param bool ignore_links: Indicates to ignore any file links.
-  """
-  found = []
-  for root, _, files in os.walk(directory, followlinks=not ignore_links):
-    for f in files:
-      p = os.path.join(root, f)
-      if ignore_links and os.path.islink(p):
-        continue
-      found.append(os.path.relpath(p, directory))
-
-  return found
-
-
-def contains_exact_files(directory, expected_files, ignore_links=False):
-  """Check if the only files which directory contains are expected_files.
-
-  :API: public
-
-  :param str directory: Path to directory to search.
-  :param iterable expected_files: Set of filepaths relative to directory to search for.
-  :param bool ignore_links: Indicates to ignore any file links.
-  """
-
-  return sorted(expected_files) == sorted(exact_files(directory, ignore_links=ignore_links))
-
-
-def check_file_content(path, expected_content):
-  """Check file has expected content.
-
-  :API: public
-
-  :param str path: Path to file.
-  :param str expected_content: Expected file content.
-  """
-  with open(path, 'r') as input:
-    return expected_content == input.read()
-
-
-def check_symlinks(directory, symlinks=True):
-  """Check files under directory are symlinks.
-
-  :API: public
-
-  :param str directory: Path to directory to search.
-  :param bool symlinks: If true, verify files are symlinks, if false, verify files are actual files.
-  """
-  for root, _, files in os.walk(directory):
-    for f in files:
-      p = os.path.join(root, f)
-      if symlinks ^ os.path.islink(p):
-        return False
-  return True
+from pants.testutil.file_test_util import check_file_content as check_file_content  # noqa
+from pants.testutil.file_test_util import check_symlinks as check_symlinks  # noqa
+from pants.testutil.file_test_util import contains_exact_files as contains_exact_files  # noqa
+from pants.testutil.file_test_util import exact_files as exact_files  # noqa
diff --git a/tests/python/pants_test/testutils/git_util.py b/tests/python/pants_test/testutils/git_util.py
index efa945ac4dd..5dfeab53fbe 100644
--- a/tests/python/pants_test/testutils/git_util.py
+++ b/tests/python/pants_test/testutils/git_util.py
@@ -1,58 +1,6 @@
 # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
 # Licensed under the Apache License, Version 2.0 (see LICENSE).
 
-import re
-import subprocess
-from contextlib import contextmanager
-from typing import Iterator, Optional
-
-from pants.base.revision import Revision
-from pants.scm.git import Git
-from pants.util.contextutil import environment_as, temporary_dir
-
-
-MIN_REQUIRED_GIT_VERSION = Revision.semver('1.7.10')
-
-
-def git_version() -> Revision:
-  """Get a Version() based on installed command-line git's version"""
-  stdout = subprocess.run(
-    ['git', '--version'], stdout=subprocess.PIPE, encoding="utf-8", check=True
-  ).stdout
-  # stdout is like 'git version 1.9.1.598.g9119e8b\n'  We want '1.9.1.598'
-  matches = re.search(r'\s(\d+(?:\.\d+)*)[\s\.]', stdout)
-  if matches is None:
-    raise ValueError(f"Not able to parse git version from {stdout}.")
-  return Revision.lenient(matches.group(1))
-
-
-@contextmanager
-def initialize_repo(worktree: str, *, gitdir: Optional[str] = None) -> Iterator[Git]:
-  """Initialize a git repository for the given `worktree`.
-
-  NB: The given `worktree` must contain at least one file which will be committed to form an initial
-  commit.
-
-  :param worktree: The path to the git work tree.
-  :param gitdir: An optional path to the `.git` dir to use.
-  :returns: A `Git` repository object that can be used to interact with the repo.
-  """
-  @contextmanager
-  def use_gitdir() -> Iterator[str]:
-    if gitdir:
-      yield gitdir
-    else:
-      with temporary_dir() as d:
-        yield d
-
-  with use_gitdir() as git_dir, environment_as(GIT_DIR=git_dir, GIT_WORK_TREE=worktree):
-    subprocess.run(['git', 'init'], check=True)
-    subprocess.run(['git', 'config', 'user.email', 'you@example.com'], check=True)
-    # TODO: This method inherits the global git settings, so if a developer has gpg signing on, this
-    # will turn that off. We should probably just disable reading from the global config somehow:
-    # https://git-scm.com/docs/git-config.
-    subprocess.run(['git', 'config', 'commit.gpgSign', 'false'], check=True)
-    subprocess.run(['git', 'config', 'user.name', 'Your Name'], check=True)
-    subprocess.run(['git', 'add', '.'], check=True)
-    subprocess.run(['git', 'commit', '-am', 'Add project files.'], check=True)
-    yield Git(gitdir=git_dir, worktree=worktree)
+from pants.testutil.git_util import MIN_REQUIRED_GIT_VERSION as MIN_REQUIRED_GIT_VERSION  # noqa
+from pants.testutil.git_util import git_version as git_version  # noqa
+from pants.testutil.git_util import initialize_repo as initialize_repo  # noqa
diff --git a/tests/python/pants_test/testutils/mock_logger.py b/tests/python/pants_test/testutils/mock_logger.py
index 8fc590f39f9..ea555d78ff2 100644
--- a/tests/python/pants_test/testutils/mock_logger.py
+++ b/tests/python/pants_test/testutils/mock_logger.py
@@ -1,52 +1,4 @@
 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
 # Licensed under the Apache License, Version 2.0 (see LICENSE).
 
-import sys
-
-from pants.reporting.report import Report
-
-
-class MockLogger:
-  """A standalone logger that writes to stderr.
-
-  :API: public
-
-  Useful for testing without requiring the full RunTracker reporting framework.
-  """
-
-  def __init__(self, level=Report.INFO):
-    self._level = level
-
-  def _maybe_log(self, level, *msg_elements):
-    if level <= self._level:
-      sys.stderr.write(''.join(msg_elements))
-
-  def debug(self, *msg_elements):
-    """
-    :API: public
-    """
-    self._maybe_log(Report.DEBUG, *msg_elements)
-
-  def info(self, *msg_elements):
-    """
-    :API: public
-    """
-    self._maybe_log(Report.INFO, *msg_elements)
-
-  def warn(self, *msg_elements):
-    """
-    :API: public
-    """
-    self._maybe_log(Report.WARN, *msg_elements)
-
-  def error(self, *msg_elements):
-    """
-    :API: public
-    """
-    self._maybe_log(Report.ERROR, *msg_elements)
-
-  def fatal(self, *msg_elements):
-    """
-    :API: public
-    """
-    self._maybe_log(Report.FATAL, *msg_elements)
+from pants.testutil.mock_logger import MockLogger as MockLogger  # noqa
diff --git a/tests/python/pants_test/testutils/pexrc_util.py b/tests/python/pants_test/testutils/pexrc_util.py
index 84da727e814..01de54968e6 100644
--- a/tests/python/pants_test/testutils/pexrc_util.py
+++ b/tests/python/pants_test/testutils/pexrc_util.py
@@ -1,33 +1,6 @@
 # Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
 # Licensed under the Apache License, Version 2.0 (see LICENSE).
 
-import os
-from contextlib import contextmanager
-
-from pants.base.build_environment import get_pants_cachedir
-from pants.util.contextutil import environment_as, temporary_dir
-from pants.util.dirutil import safe_mkdir_for
-
-
-@contextmanager
-def setup_pexrc_with_pex_python_path(interpreter_paths):
-  """A helper function for writing interpreter paths to a PEX_PYTHON_PATH variable in a .pexrc file.
-
-  NB: Mutates HOME and XDG_CACHE_HOME to ensure a `~/.pexrc` that won't trample any existing file
-  and will also be found.
-
-  :param list interpreter_paths: a list of paths to interpreter binaries to include on
-                                 PEX_PYTHON_PATH.
-  """
-  cache_dir = get_pants_cachedir()
-  with temporary_dir() as home:
-    xdg_cache_home = os.path.join(home, '.cache')
-    with environment_as(HOME=home, XDG_CACHE_HOME=xdg_cache_home):
-      target = os.path.join(xdg_cache_home, os.path.basename(cache_dir))
-      safe_mkdir_for(target)
-      os.symlink(cache_dir, target)
-
-      with open(os.path.join(home, '.pexrc'), 'w') as pexrc:
-        pexrc.write('PEX_PYTHON_PATH={}'.format(':'.join(interpreter_paths)))
-
-      yield
+from pants.testutil.pexrc_util import (
+  setup_pexrc_with_pex_python_path as setup_pexrc_with_pex_python_path,
+)  # noqa
diff --git a/tests/python/pants_test/testutils/process_test_util.py b/tests/python/pants_test/testutils/process_test_util.py
index dee9b6bd57b..997960aff3f 100644
--- a/tests/python/pants_test/testutils/process_test_util.py
+++ b/tests/python/pants_test/testutils/process_test_util.py
@@ -1,58 +1,12 @@
 # Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
 # Licensed under the Apache License, Version 2.0 (see LICENSE).
 
-from contextlib import contextmanager
-from dataclasses import dataclass
-from typing import Any
-
-import psutil
-
-
-class ProcessStillRunning(AssertionError):
-  """Raised when a process shouldn't be running but is."""
-
-
-def _safe_iter_matching_processes(name):
-  for proc in psutil.process_iter():
-    try:
-      if name in ''.join(proc.cmdline()):
-        yield proc
-    except (psutil.NoSuchProcess, psutil.AccessDenied):
-      pass
-
-
-def _make_process_table(processes):
-  line_tmpl = '{0:>7} {1:>7} {2}'
-  proc_tuples = [(p.pid, p.ppid(), ''.join(p.cmdline())) for p in processes]
-  return '\n'.join(
-    [
-      line_tmpl.format('PID', 'PGID', 'CMDLINE')
-    ] + [
-      line_tmpl.format(*t) for t in sorted(proc_tuples)
-    ]
-  )
-
-
-@contextmanager
-def no_lingering_process_by_command(name):
-  """Asserts that no process exists for a given command with a helpful error, excluding
-  existing processes outside of the scope of the contextmanager."""
-  context = TrackedProcessesContext(name, set(_safe_iter_matching_processes(name)))
-  yield context
-  delta_processes = context.current_processes()
-  if delta_processes:
-    raise ProcessStillRunning(
-      '{} {} processes lingered after tests:\n{}'
-      .format(len(delta_processes), name, _make_process_table(delta_processes))
-    )
-
-
-@dataclass(frozen=True)
-class TrackedProcessesContext:
-  name: Any
-  before_processes: Any
-
-  def current_processes(self):
-    """Returns the current set of matching processes created since the context was entered."""
-    after_processes = set(_safe_iter_matching_processes(self.name))
-    return after_processes.difference(self.before_processes)
+from pants.testutil.process_test_util import ProcessStillRunning as ProcessStillRunning  # noqa
+from pants.testutil.process_test_util import TrackedProcessesContext as TrackedProcessesContext  # noqa
+from pants.testutil.process_test_util import _make_process_table as _make_process_table  # noqa
+from pants.testutil.process_test_util import (
+  _safe_iter_matching_processes as _safe_iter_matching_processes,
+)  # noqa
+from pants.testutil.process_test_util import (
+  no_lingering_process_by_command as no_lingering_process_by_command,
+)  # noqa