Skip to content

Commit

Permalink
Add __all__ to torch.{fx, distributed, backends} submodules (pytorch#…
Browse files Browse the repository at this point in the history
…85079)

Pull Request resolved: pytorch#85079
Approved by: https://github.com/rohan-varma
  • Loading branch information
anjali411 authored and pytorchmergebot committed Sep 20, 2022
1 parent a4dca98 commit cf2f552
Show file tree
Hide file tree
Showing 15 changed files with 19 additions and 111 deletions.
110 changes: 0 additions & 110 deletions test/allowlist_for_publicAPI.json
Original file line number Diff line number Diff line change
Expand Up @@ -319,9 +319,6 @@
"torch.backends": [
"contextmanager"
],
"torch.backends.cuda": [
"Union"
],
"torch.cpu.amp.autocast_mode": [
"Any"
],
Expand Down Expand Up @@ -438,56 +435,12 @@
"Union",
"get_logger"
],
"torch.distributed.elastic.multiprocessing.api": [
"Any",
"Callable",
"Dict",
"FrameType",
"IntFlag",
"Optional",
"ProcessFailure",
"Set",
"TailLog",
"Tuple",
"Union",
"dataclass",
"field",
"nullcontext",
"record",
"redirect_stderr",
"redirect_stdout"
],
"torch.distributed.elastic.multiprocessing.errors": [
"Any",
"Callable",
"Dict",
"GlobalRank",
"JSON",
"List",
"Optional",
"Template",
"Tuple",
"TypeVar",
"dataclass",
"datetime",
"field",
"get_logger",
"wraps"
],
"torch.distributed.elastic.multiprocessing.redirects": [
"contextmanager",
"partial",
"redirect_stderr",
"redirect_stdout"
],
"torch.distributed.elastic.multiprocessing.tail_log": [
"Dict",
"Event",
"Future",
"List",
"TextIO",
"ThreadPoolExecutor"
],
"torch.distributed.elastic.rendezvous": [
"RendezvousHandlerCreator"
],
Expand Down Expand Up @@ -755,37 +708,6 @@
"Optional",
"Union"
],
"torch.distributions.kl": [
"Bernoulli",
"Beta",
"Binomial",
"Callable",
"Categorical",
"Cauchy",
"ContinuousBernoulli",
"Dict",
"Dirichlet",
"Distribution",
"Exponential",
"ExponentialFamily",
"Gamma",
"Geometric",
"Gumbel",
"HalfNormal",
"Independent",
"Laplace",
"LowRankMultivariateNormal",
"MultivariateNormal",
"Normal",
"OneHotCategorical",
"Pareto",
"Poisson",
"TransformedDistribution",
"Tuple",
"Type",
"Uniform",
"total_ordering"
],
"torch.distributions.utils": [
"Any",
"Dict",
Expand Down Expand Up @@ -846,30 +768,6 @@
"reify",
"unify"
],
"torch.fx.experimental.unification.multipledispatch.conflict": [
"groupby",
"isvariadic"
],
"torch.fx.experimental.unification.multipledispatch.core": [
"Dispatcher",
"MethodDispatcher"
],
"torch.fx.experimental.unification.multipledispatch.dispatcher": [
"AmbiguityWarning",
"Variadic",
"ambiguities",
"expand_tuples",
"isvariadic",
"ordering",
"super_signature",
"warn"
],
"torch.fx.experimental.unification.multipledispatch.utils": [
"OrderedDict"
],
"torch.fx.experimental.unification.multipledispatch.variadic": [
"typename"
],
"torch.fx.experimental.unification.unification_tools": [
"first",
"getter",
Expand Down Expand Up @@ -917,14 +815,6 @@
"Union",
"compatibility"
],
"torch.fx.immutable_collections": [
"Any",
"Context",
"Dict",
"List",
"Tuple",
"compatibility"
],
"torch.fx.operator_schemas": [
"Any",
"Callable",
Expand Down
2 changes: 2 additions & 0 deletions torch/backends/cuda/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@

from typing import Union

__all__ = ["is_built", "cuFFTPlanCacheAttrContextProp", "cuFFTPlanCache", "cuFFTPlanCacheManager",
"cuBLASModule", "preferred_linalg_library", "cufft_plan_cache", "matmul"]

def is_built():
r"""Returns whether PyTorch is built with CUDA support. Note that this
Expand Down
2 changes: 2 additions & 0 deletions torch/distributed/elastic/multiprocessing/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@

log = logging.getLogger(__name__)

__all__ = ["SignalException", "Std", "to_map", "RunProcsResult", "PContext", "get_std_cm", "MultiprocessContext",
"SubprocessHandler", "SubprocessContext"]

class SignalException(Exception):
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@
from .error_handler import ErrorHandler # noqa: F401
from .handlers import get_error_handler # noqa: F401

__all__ = ["ProcessFailure", "ChildFailedError", "record", "ErrorHandler", "get_error_handler"]

log = get_logger()

Expand Down
1 change: 1 addition & 0 deletions torch/distributed/elastic/multiprocessing/tail_log.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from threading import Event
from typing import Dict, List, TextIO

__all__ = ["tail_logfile", "TailLog"]

log = logging.getLogger(__name__)

Expand Down
1 change: 0 additions & 1 deletion torch/distributed/rpc/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@
_ignore_rref_leak = True
_default_pickler = _internal_rpc_pickler


@contextlib.contextmanager
def _use_rpc_pickler(rpc_pickler):
r"""
Expand Down
1 change: 1 addition & 0 deletions torch/distributions/kl.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
_KL_REGISTRY = {} # Source of truth mapping a few general (type, type) pairs to functions.
_KL_MEMOIZE: Dict[Tuple[Type, Type], Callable] = {} # Memoized version mapping many specific (type, type) pairs to functions.

__all__ = ["register_kl", "kl_divergence"]

def register_kl(type_p, type_q):
"""
Expand Down
2 changes: 2 additions & 0 deletions torch/fx/experimental/unification/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
from .variable import isvar
from .dispatch import dispatch

__all__ = ["reify", "unify"]

################
# Reificiation #
################
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from .utils import _toposort, groupby
from .variadic import isvariadic

__all__ = ["AmbiguityWarning", "supercedes", "consistent", "ambiguous", "ambiguities", "super_signature",
"edge", "ordering"]

class AmbiguityWarning(Warning):
pass
Expand Down
1 change: 1 addition & 0 deletions torch/fx/experimental/unification/multipledispatch/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

global_namespace = {} # type: ignore[var-annotated]

__all__ = ["dispatch", "ismethod"]

def dispatch(*types, **kwargs):
""" Dispatch function on the types of the inputs
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
from .variadic import Variadic, isvariadic
import itertools as itl

__all__ = ["MDNotImplementedError", "ambiguity_warn", "halt_ordering", "restart_ordering", "variadic_signature_matches_iter",
"variadic_signature_matches", "Dispatcher", "source", "MethodDispatcher", "str_signature", "warning_text"]

class MDNotImplementedError(NotImplementedError):
""" A NotImplementedError for multiple dispatch """
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from collections import OrderedDict

__all__ = ["raises", "expand_tuples", "reverse_dict", "groupby", "typename"]

def raises(err, lamda):
try:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

from .utils import typename

__all__ = ["VariadicSignatureType", "isvariadic", "VariadicSignatureMeta", "Variadic"]

class VariadicSignatureType(type):
# checking if subclass is a subclass of self
Expand Down
1 change: 1 addition & 0 deletions torch/fx/experimental/unification/utils.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
__all__ = ["hashable", "transitive_get", "raises", "reverse_dict", "xfail", "freeze"]
def hashable(x):
try:
hash(x)
Expand Down
2 changes: 2 additions & 0 deletions torch/fx/immutable_collections.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
from ._compatibility import compatibility
from torch.utils._pytree import Context, _register_pytree_node

__all__ = ["immutable_list", "immutable_dict"]

_help_mutation = """\
If you are attempting to modify the kwargs or args of a torch.fx.Node object,
instead create a new copy of it and assign the copy to the node:
Expand Down

0 comments on commit cf2f552

Please sign in to comment.