Skip to content

Commit

Permalink
Enable import following in MYPYNOFOLLOW (now MYPYINDUCTOR) (pytorch#1…
Browse files Browse the repository at this point in the history
…13830)

Skipping importing some packages for now to make this change more
tractable.

For some reason, lintrunner on CI raises errors in all imported `.pyi` files,
even though it doesn't on my local machine. The errors are all from missing
generic types, as the MYPYINDUCTOR config has `disallow_any_generics`
set. I have thus added `disable-error-code` comments to the relevant files,
though I fixed a few that were easy enough.

Pull Request resolved: pytorch#113830
Approved by: https://github.com/Skylion007
ghstack dependencies: pytorch#113722, pytorch#113721
  • Loading branch information
int3 authored and pytorchmergebot committed Nov 17, 2023
1 parent 0c8362d commit 631fb33
Show file tree
Hide file tree
Showing 14 changed files with 43 additions and 22 deletions.
7 changes: 3 additions & 4 deletions .lintrunner.toml
Original file line number Diff line number Diff line change
Expand Up @@ -178,11 +178,10 @@ init_command = [
]

[[linter]]
code = 'MYPYNOFOLLOW'
code = 'MYPYINDUCTOR'
include_patterns = [
'torch/_dynamo/**/*.py',
'torch/_inductor/**/*.py',
'torch/_C/_dynamo/**/*.py',
]
exclude_patterns = [
'**/fb/**',
Expand All @@ -194,8 +193,8 @@ exclude_patterns = [
command = [
'python3',
'tools/linter/adapters/mypy_linter.py',
'--config=mypy-nofollow.ini',
'--code=MYPYNOFOLLOW',
'--config=mypy-inductor.ini',
'--code=MYPYINDUCTOR',
'--',
'@{{PATHSFILE}}'
]
Expand Down
34 changes: 24 additions & 10 deletions mypy-nofollow.ini → mypy-inductor.ini
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ warn_redundant_casts = True
show_error_codes = True
show_column_numbers = True
check_untyped_defs = True
follow_imports = skip
follow_imports = silent

# do not reenable this:
# https://github.com/pytorch/pytorch/pull/60006#issuecomment-866130657
Expand All @@ -19,8 +19,9 @@ files =
torch/_dynamo,
torch/_inductor

# Minimum version supported - variable annotations were introduced
# in Python 3.8
# We access some Python runtime classes / class members that are only available
# in 3.11. These accesses are gated by runtime checks that cannot always be
# understood by mypy.
python_version = 3.11

[mypy-deeplearning.*]
Expand All @@ -32,21 +33,34 @@ ignore_missing_imports = True
[mypy-einops.*]
ignore_missing_imports = True

[mypy-sympy]
ignore_missing_imports = True

# sympy is too dynamic, hard to type properly
[mypy-sympy.*]
ignore_missing_imports = True
follow_imports = skip

[mypy-torch._C]
ignore_errors = True
# FIXME: importing this creates lots of type errors
[mypy-torch._dynamo.variables.*]
follow_imports = skip

[mypy-torch._C.*]
ignore_errors = True
# FIXME: importing this creates lots of type errors
[mypy-torch.backends.*]
follow_imports = skip

[mypy-torch.fb.*]
ignore_missing_imports = True

# FIXME: importing this creates lots of type errors
[mypy-torch.fx.*]
follow_imports = skip

# FIXME: importing this creates lots of type errors
[mypy-torch.testing._internal.*]
follow_imports = skip

# sympy is too dynamic, hard to type properly
[mypy-torch.utils._sympy.*]
follow_imports = skip

[mypy-torch_xla.*]
ignore_missing_imports = True

Expand Down
6 changes: 4 additions & 2 deletions tools/generate_torch_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,11 +86,13 @@ def get_torch_version(sha: Optional[str] = None) -> str:
version = tagged_version

with open(version_path, "w") as f:
f.write("from typing import Optional\n\n")
f.write("__all__ = ['__version__', 'debug', 'cuda', 'git_version', 'hip']\n")
f.write(f"__version__ = '{version}'\n")
# NB: This is not 100% accurate, because you could have built the
# library code with DEBUG, but csrc without DEBUG (in which case
# this would claim to be a release build when it's not.)
f.write(f"debug = {repr(bool(args.is_debug))}\n")
f.write(f"cuda = {repr(args.cuda_version)}\n")
f.write(f"cuda: Optional[str] = {repr(args.cuda_version)}\n")
f.write(f"git_version = {repr(sha)}\n")
f.write(f"hip = {repr(args.hip_version)}\n")
f.write(f"hip: Optional[str] = {repr(args.hip_version)}\n")
1 change: 1 addition & 0 deletions torch/_C/_VariableFunctions.pyi.in
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# ${generated_comment}
# mypy: disable-error-code="type-arg"

import builtins
from typing import (
Expand Down
1 change: 1 addition & 0 deletions torch/_C/__init__.pyi.in
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# ${generated_comment}
# mypy: disable-error-code="type-arg"

import builtins
from enum import Enum, IntEnum
Expand Down
4 changes: 2 additions & 2 deletions torch/_C/_autograd.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,8 @@ def _supported_activities() -> Set[ProfilerActivity]: ...
def _enable_record_function(enable: bool) -> None: ...
def _set_empty_test_observer(is_global: bool, sampling_prob: float) -> None: ...
def _push_saved_tensors_default_hooks(
pack_hook: Callable,
unpack_hook: Callable,
pack_hook: Callable[[torch.Tensor], Any],
unpack_hook: Callable[[Any], torch.Tensor],
) -> None: ...
def _pop_saved_tensors_default_hooks() -> None: ...
def _unsafe_set_version_counter(t: torch.Tensor, prev_version: int) -> None: ...
Expand Down
1 change: 1 addition & 0 deletions torch/_C/_distributed_c10d.pyi
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
# mypy: disable-error-code="type-arg"
from datetime import timedelta
from enum import Enum
from typing import Any, Dict, List, Optional, overload, Tuple, Union
Expand Down
1 change: 1 addition & 0 deletions torch/_C/_distributed_rpc.pyi
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
# mypy: disable-error-code="type-arg"
from datetime import timedelta
from typing import Any, Dict, Generic, List, Optional, overload, Tuple, Type, TypeVar

Expand Down
1 change: 1 addition & 0 deletions torch/_C/_nn.pyi.in
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
# mypy: disable-error-code="type-arg"
from typing import List, Optional, overload, Sequence, Tuple, Union

from torch import memory_format, Tensor
Expand Down
1 change: 0 additions & 1 deletion torch/_C/_profiler.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@ class _ExperimentalConfig:
profiler_measure_per_kernel: bool = ...,
verbose: bool = ...,
performance_events: List[str] = ...,
privateuse1_config: Dict = ...,
enable_cuda_sync_events: bool = ...,
) -> None: ...

Expand Down
3 changes: 2 additions & 1 deletion torch/_dynamo/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1445,6 +1445,7 @@ def get_fake_value(node, tx, allow_non_graph_fake=False):
If `True`, you must be prepared to deal with such return values, ideally
by further wrapping them as this graph's fakes.
"""
from torch.utils._sympy.value_ranges import ValueRangeError
from .exc import (
TorchRuntimeError,
unimplemented,
Expand Down Expand Up @@ -1518,7 +1519,7 @@ def get_fake_value(node, tx, allow_non_graph_fake=False):
f"constrain_as_value OR constrain_as_size APIs. {cause}",
case_name="constrain_as_size_example",
)
elif isinstance(cause, torch.utils._sympy.value_ranges.ValueRangeError):
elif isinstance(cause, ValueRangeError):
raise UserError(UserErrorType.CONSTRAINT_VIOLATION, e.args[0]) from e
raise TorchRuntimeError(str(e)).with_traceback(e.__traceback__) from None

Expand Down
2 changes: 1 addition & 1 deletion torch/distributed/optim/zero_redundancy_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -455,7 +455,7 @@ def _clear_cache(self) -> None:
self._device_to_params_per_rank_cache.clear()
self._bucket_assignments_per_rank_cache.clear()

def add_param_group(self, param_group: dict) -> None:
def add_param_group(self, param_group: Dict[str, Any]) -> None:
r"""
Add a parameter group to the :class:`Optimizer` 's ``param_groups``.
Expand Down
2 changes: 1 addition & 1 deletion torch/distributed/optim/zero_redundancy_optimizer.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ class ZeroRedundancyOptimizer(Optimizer, Joinable):
overlap_with_ddp: bool = ...,
**defaults: Any,
) -> None: ...
def add_param_group(self, param_group: dict) -> None: ...
def add_param_group(self, param_group: Dict[str, Any]) -> None: ...
def consolidate_state_dict(self, to: int = ...) -> None: ...
@overload
def step(self, closure: None = ..., **kwargs: Any) -> None: ...
Expand Down
1 change: 1 addition & 0 deletions torch/jit/_script.pyi
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
# mypy: disable-error-code="type-arg"
from typing import (
Any,
Callable,
Expand Down

0 comments on commit 631fb33

Please sign in to comment.