From 631fb33fd618c12053410574e0bad498e3e6e29e Mon Sep 17 00:00:00 2001 From: Jez Ng Date: Thu, 16 Nov 2023 12:28:24 -0800 Subject: [PATCH] Enable import following in MYPYNOFOLLOW (now MYPYINDUCTOR) (#113830) Skipping importing some packages for now to make this change more tractable. For some reason, lintrunner on CI raises errors in all imported `.pyi` files, even though it doesn't on my local machine. The errors are all from missing generic types, as the MYPYINDUCTOR config has `disallow_any_generics` set. I have thus added `disable-error-code` comments to the relevant files, though I fixed a few that were easy enough. Pull Request resolved: https://github.com/pytorch/pytorch/pull/113830 Approved by: https://github.com/Skylion007 ghstack dependencies: #113722, #113721 --- .lintrunner.toml | 7 ++-- mypy-nofollow.ini => mypy-inductor.ini | 34 +++++++++++++------ tools/generate_torch_version.py | 6 ++-- torch/_C/_VariableFunctions.pyi.in | 1 + torch/_C/__init__.pyi.in | 1 + torch/_C/_autograd.pyi | 4 +-- torch/_C/_distributed_c10d.pyi | 1 + torch/_C/_distributed_rpc.pyi | 1 + torch/_C/_nn.pyi.in | 1 + torch/_C/_profiler.pyi | 1 - torch/_dynamo/utils.py | 3 +- .../optim/zero_redundancy_optimizer.py | 2 +- .../optim/zero_redundancy_optimizer.pyi | 2 +- torch/jit/_script.pyi | 1 + 14 files changed, 43 insertions(+), 22 deletions(-) rename mypy-nofollow.ini => mypy-inductor.ini (54%) diff --git a/.lintrunner.toml b/.lintrunner.toml index eb9b3b5fa1797b..52d59e2bfd17d8 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -178,11 +178,10 @@ init_command = [ ] [[linter]] -code = 'MYPYNOFOLLOW' +code = 'MYPYINDUCTOR' include_patterns = [ 'torch/_dynamo/**/*.py', 'torch/_inductor/**/*.py', - 'torch/_C/_dynamo/**/*.py', ] exclude_patterns = [ '**/fb/**', @@ -194,8 +193,8 @@ exclude_patterns = [ command = [ 'python3', 'tools/linter/adapters/mypy_linter.py', - '--config=mypy-nofollow.ini', - '--code=MYPYNOFOLLOW', + '--config=mypy-inductor.ini', + '--code=MYPYINDUCTOR', '--', '@{{PATHSFILE}}' ] diff --git a/mypy-nofollow.ini b/mypy-inductor.ini similarity index 54% rename from mypy-nofollow.ini rename to mypy-inductor.ini index 3ef42003595a96..b6026eceb72cda 100644 --- a/mypy-nofollow.ini +++ b/mypy-inductor.ini @@ -8,7 +8,7 @@ warn_redundant_casts = True show_error_codes = True show_column_numbers = True check_untyped_defs = True -follow_imports = skip +follow_imports = silent # do not reenable this: # https://github.com/pytorch/pytorch/pull/60006#issuecomment-866130657 @@ -19,8 +19,9 @@ files = torch/_dynamo, torch/_inductor -# Minimum version supported - variable annotations were introduced -# in Python 3.8 +# We access some Python runtime classes / class members that are only available +# in 3.11. These accesses are gated by runtime checks that cannot always be +# understood by mypy. python_version = 3.11 [mypy-deeplearning.*] @@ -32,21 +33,34 @@ ignore_missing_imports = True [mypy-einops.*] ignore_missing_imports = True -[mypy-sympy] -ignore_missing_imports = True - +# sympy is too dynamic, hard to type properly [mypy-sympy.*] ignore_missing_imports = True +follow_imports = skip -[mypy-torch._C] -ignore_errors = True +# FIXME: importing this creates lots of type errors +[mypy-torch._dynamo.variables.*] +follow_imports = skip -[mypy-torch._C.*] -ignore_errors = True +# FIXME: importing this creates lots of type errors +[mypy-torch.backends.*] +follow_imports = skip [mypy-torch.fb.*] ignore_missing_imports = True +# FIXME: importing this creates lots of type errors +[mypy-torch.fx.*] +follow_imports = skip + +# FIXME: importing this creates lots of type errors +[mypy-torch.testing._internal.*] +follow_imports = skip + +# sympy is too dynamic, hard to type properly +[mypy-torch.utils._sympy.*] +follow_imports = skip + [mypy-torch_xla.*] ignore_missing_imports = True diff --git a/tools/generate_torch_version.py b/tools/generate_torch_version.py index d90d3646ab1910..936896934c493f 100644 --- a/tools/generate_torch_version.py +++ b/tools/generate_torch_version.py @@ -86,11 +86,13 @@ def get_torch_version(sha: Optional[str] = None) -> str: version = tagged_version with open(version_path, "w") as f: + f.write("from typing import Optional\n\n") + f.write("__all__ = ['__version__', 'debug', 'cuda', 'git_version', 'hip']\n") f.write(f"__version__ = '{version}'\n") # NB: This is not 100% accurate, because you could have built the # library code with DEBUG, but csrc without DEBUG (in which case # this would claim to be a release build when it's not.) f.write(f"debug = {repr(bool(args.is_debug))}\n") - f.write(f"cuda = {repr(args.cuda_version)}\n") + f.write(f"cuda: Optional[str] = {repr(args.cuda_version)}\n") f.write(f"git_version = {repr(sha)}\n") - f.write(f"hip = {repr(args.hip_version)}\n") + f.write(f"hip: Optional[str] = {repr(args.hip_version)}\n") diff --git a/torch/_C/_VariableFunctions.pyi.in b/torch/_C/_VariableFunctions.pyi.in index 6b6178452350e1..24f9f0f9e9fb43 100644 --- a/torch/_C/_VariableFunctions.pyi.in +++ b/torch/_C/_VariableFunctions.pyi.in @@ -1,4 +1,5 @@ # ${generated_comment} +# mypy: disable-error-code="type-arg" import builtins from typing import ( diff --git a/torch/_C/__init__.pyi.in b/torch/_C/__init__.pyi.in index 3753baab4288d0..6847955348ed16 100644 --- a/torch/_C/__init__.pyi.in +++ b/torch/_C/__init__.pyi.in @@ -1,4 +1,5 @@ # ${generated_comment} +# mypy: disable-error-code="type-arg" import builtins from enum import Enum, IntEnum diff --git a/torch/_C/_autograd.pyi b/torch/_C/_autograd.pyi index 0a92d31b3231e1..4b62950fe85c13 100644 --- a/torch/_C/_autograd.pyi +++ b/torch/_C/_autograd.pyi @@ -100,8 +100,8 @@ def _supported_activities() -> Set[ProfilerActivity]: ... def _enable_record_function(enable: bool) -> None: ... def _set_empty_test_observer(is_global: bool, sampling_prob: float) -> None: ... def _push_saved_tensors_default_hooks( - pack_hook: Callable, - unpack_hook: Callable, + pack_hook: Callable[[torch.Tensor], Any], + unpack_hook: Callable[[Any], torch.Tensor], ) -> None: ... def _pop_saved_tensors_default_hooks() -> None: ... def _unsafe_set_version_counter(t: torch.Tensor, prev_version: int) -> None: ... diff --git a/torch/_C/_distributed_c10d.pyi b/torch/_C/_distributed_c10d.pyi index 93d780e7b692d9..6e16c3a4c1b0b3 100644 --- a/torch/_C/_distributed_c10d.pyi +++ b/torch/_C/_distributed_c10d.pyi @@ -1,3 +1,4 @@ +# mypy: disable-error-code="type-arg" from datetime import timedelta from enum import Enum from typing import Any, Dict, List, Optional, overload, Tuple, Union diff --git a/torch/_C/_distributed_rpc.pyi b/torch/_C/_distributed_rpc.pyi index 6bf2be5c031abf..7909e0b8e33c6a 100644 --- a/torch/_C/_distributed_rpc.pyi +++ b/torch/_C/_distributed_rpc.pyi @@ -1,3 +1,4 @@ +# mypy: disable-error-code="type-arg" from datetime import timedelta from typing import Any, Dict, Generic, List, Optional, overload, Tuple, Type, TypeVar diff --git a/torch/_C/_nn.pyi.in b/torch/_C/_nn.pyi.in index c757798f15e5e8..cca00d5c577d43 100644 --- a/torch/_C/_nn.pyi.in +++ b/torch/_C/_nn.pyi.in @@ -1,3 +1,4 @@ +# mypy: disable-error-code="type-arg" from typing import List, Optional, overload, Sequence, Tuple, Union from torch import memory_format, Tensor diff --git a/torch/_C/_profiler.pyi b/torch/_C/_profiler.pyi index 347db2482cd439..f1b8cda073b0da 100644 --- a/torch/_C/_profiler.pyi +++ b/torch/_C/_profiler.pyi @@ -58,7 +58,6 @@ class _ExperimentalConfig: profiler_measure_per_kernel: bool = ..., verbose: bool = ..., performance_events: List[str] = ..., - privateuse1_config: Dict = ..., enable_cuda_sync_events: bool = ..., ) -> None: ... diff --git a/torch/_dynamo/utils.py b/torch/_dynamo/utils.py index 9597680df83692..aa0719a3ab565a 100644 --- a/torch/_dynamo/utils.py +++ b/torch/_dynamo/utils.py @@ -1445,6 +1445,7 @@ def get_fake_value(node, tx, allow_non_graph_fake=False): If `True`, you must be prepared to deal with such return values, ideally by further wrapping them as this graph's fakes. """ + from torch.utils._sympy.value_ranges import ValueRangeError from .exc import ( TorchRuntimeError, unimplemented, @@ -1518,7 +1519,7 @@ def get_fake_value(node, tx, allow_non_graph_fake=False): f"constrain_as_value OR constrain_as_size APIs. {cause}", case_name="constrain_as_size_example", ) - elif isinstance(cause, torch.utils._sympy.value_ranges.ValueRangeError): + elif isinstance(cause, ValueRangeError): raise UserError(UserErrorType.CONSTRAINT_VIOLATION, e.args[0]) from e raise TorchRuntimeError(str(e)).with_traceback(e.__traceback__) from None diff --git a/torch/distributed/optim/zero_redundancy_optimizer.py b/torch/distributed/optim/zero_redundancy_optimizer.py index be128c1a66bbfb..8a3be3b0181536 100644 --- a/torch/distributed/optim/zero_redundancy_optimizer.py +++ b/torch/distributed/optim/zero_redundancy_optimizer.py @@ -455,7 +455,7 @@ def _clear_cache(self) -> None: self._device_to_params_per_rank_cache.clear() self._bucket_assignments_per_rank_cache.clear() - def add_param_group(self, param_group: dict) -> None: + def add_param_group(self, param_group: Dict[str, Any]) -> None: r""" Add a parameter group to the :class:`Optimizer` 's ``param_groups``. diff --git a/torch/distributed/optim/zero_redundancy_optimizer.pyi b/torch/distributed/optim/zero_redundancy_optimizer.pyi index 0d6f5dccaa94e2..c341e00e3ee3d7 100644 --- a/torch/distributed/optim/zero_redundancy_optimizer.pyi +++ b/torch/distributed/optim/zero_redundancy_optimizer.pyi @@ -61,7 +61,7 @@ class ZeroRedundancyOptimizer(Optimizer, Joinable): overlap_with_ddp: bool = ..., **defaults: Any, ) -> None: ... - def add_param_group(self, param_group: dict) -> None: ... + def add_param_group(self, param_group: Dict[str, Any]) -> None: ... def consolidate_state_dict(self, to: int = ...) -> None: ... @overload def step(self, closure: None = ..., **kwargs: Any) -> None: ... diff --git a/torch/jit/_script.pyi b/torch/jit/_script.pyi index cd688a4b14beff..0a5ffec2a7abcc 100644 --- a/torch/jit/_script.pyi +++ b/torch/jit/_script.pyi @@ -1,3 +1,4 @@ +# mypy: disable-error-code="type-arg" from typing import ( Any, Callable,