Skip to content

Commit

Permalink
Fix global flake8 issues (pytorch#124771)
Browse files Browse the repository at this point in the history
Prior to this `lintrunner --all-files --take FLAKE8` failed.

Pull Request resolved: pytorch#124771
Approved by: https://github.com/Skylion007
ghstack dependencies: pytorch#124428
  • Loading branch information
aorenste authored and pytorchmergebot committed Apr 26, 2024
1 parent 609c958 commit a8574a9
Show file tree
Hide file tree
Showing 55 changed files with 213 additions and 211 deletions.
2 changes: 1 addition & 1 deletion .github/scripts/cherry_pick.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def parse_args() -> Any:
"--onto-branch", type=str, required=True, help="the target release branch"
)
parser.add_argument(
"--github-actor", type=str, required=True, help="all the worlds a stage"
"--github-actor", type=str, required=True, help="all the world's a stage"
)
parser.add_argument(
"--classification",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,8 +152,8 @@ def run(
result_entry["sequence_length"] = sequence_length
result_entry["n_heads"] = num_heads
result_entry["embed_dim"] = embed_dim
result_entry["time_native_mha_slow(μs)"] = f"{time_native_mha_slow:.3f}"
result_entry["time_native_mha_fast (μs)"] = f"{time_native_mha_fast:.3f}"
result_entry["time_native_mha_slow(\u00B5s)"] = f"{time_native_mha_slow:.3f}"
result_entry["time_native_mha_fast (\u00B5s)"] = f"{time_native_mha_fast:.3f}"
result_entry["speedup flash_mha v native_mha"] = f"{speedup_fast_internal:.3f}"
result_entry["padding"] = f"{padding:.3f}"
return result_entry
Expand Down
8 changes: 4 additions & 4 deletions benchmarks/transformer/sdp.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,10 +81,10 @@ def get_entries(self) -> List:
@classmethod
def get_entry_names(cls) -> List[str]:
return [
"nn_mha_time (μs)",
"compiled_nn_mha_time (μs)",
"composite_mha_time (μs)",
"compiled_composite_mha_time (μs)",
"nn_mha_time (\u00B5s)",
"compiled_nn_mha_time (\u00B5s)",
"composite_mha_time (\u00B5s)",
"compiled_composite_mha_time (\u00B5s)",
]


Expand Down
2 changes: 1 addition & 1 deletion functorch/einops/_parsing.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
import warnings
from typing import Collection, List, Mapping, Optional, Set, Tuple, Union

_ellipsis: str = "" # NB, this is a single unicode symbol. String is used as it is not a list, but can be iterated
_ellipsis: str = "\u2026" # NB, this is a single unicode symbol. String is used as it is not a list, but can be iterated


class AnonymousAxis:
Expand Down
6 changes: 3 additions & 3 deletions test/distributions/test_distributions.py
Original file line number Diff line number Diff line change
Expand Up @@ -3752,11 +3752,11 @@ def test_dirichlet_log_prob(self):

@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_dirichlet_log_prob_zero(self):
# Specifically test the special case where x=0 and α=1. The PDF is
# proportional to x**(α-1), which in this case works out to 0**0=1.
# Specifically test the special case where x=0 and alpha=1. The PDF is
# proportional to x**(alpha-1), which in this case works out to 0**0=1.
# The log PDF of this term should therefore be 0. However, it's easy
# to accidentally introduce NaNs by calculating log(x) without regard
# for the value of α-1.
# for the value of alpha-1.
alpha = torch.tensor([1, 2])
dist = Dirichlet(alpha)
x = torch.tensor([0, 1])
Expand Down
2 changes: 1 addition & 1 deletion test/functorch/test_parsing.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def test_invalid_expressions(self) -> None:
ParsedExpression("(a) ((b c) (d ...))")

# invalid identifiers
ParsedExpression("camelCase under_scored cApiTaLs ß ...")
ParsedExpression("camelCase under_scored cApiTaLs \u00DF ...")
with self.assertRaises(ValueError):
ParsedExpression("1a")
with self.assertRaises(ValueError):
Expand Down
4 changes: 2 additions & 2 deletions test/inductor/test_templated_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,8 +346,8 @@ def sdpa_hop(q, k, v, score_mod):
# this means that the base for the LSE computed by ref is e while for the compiled
# version it is 2. To compare we use the change of base formula
# log_2(x_compiled) = log_e(x_ref) * log_2(e) where
# x_ref = _i e^(scores[i])
# x_compiled = _i 2^(log2(e) * scores[i])
# x_ref = sum(_i e^(scores[i]))
# x_compiled = sum(_i 2^(log2(e) * scores[i]))

self.assertTrue(ref_lse.dtype == torch.float32)
self.assertTrue(compiled_lse.dtype == torch.float32)
Expand Down
20 changes: 10 additions & 10 deletions test/package/test_directory_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,16 +111,16 @@ def test_resource_reader(self):
with PackageExporter(filename) as pe:
# Layout looks like:
# package
# ├── one/
# ├── a.txt
# ├── b.txt
# ├── c.txt
# └── three/
# ├── d.txt
# └── e.txt
# └── two/
# ├── f.txt
# └── g.txt
# |-- one/
# | |-- a.txt
# | |-- b.txt
# | |-- c.txt
# | +-- three/
# | |-- d.txt
# | +-- e.txt
# +-- two/
# |-- f.txt
# +-- g.txt
pe.save_text("one", "a.txt", "hello, a!")
pe.save_text("one", "b.txt", "hello, b!")
pe.save_text("one", "c.txt", "hello, c!")
Expand Down
64 changes: 32 additions & 32 deletions test/package/test_misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,46 +38,46 @@ def test_file_structure(self):

export_plain = dedent(
"""\
├── .data
├── extern_modules
├── python_version
├── serialization_id
└── version
├── main
└── main
├── obj
└── obj.pkl
├── package_a
├── __init__.py
└── subpackage.py
├── byteorder
└── module_a.py
\u251c\u2500\u2500 .data
\u2502 \u251c\u2500\u2500 extern_modules
\u2502 \u251c\u2500\u2500 python_version
\u2502 \u251c\u2500\u2500 serialization_id
\u2502 \u2514\u2500\u2500 version
\u251c\u2500\u2500 main
\u2502 \u2514\u2500\u2500 main
\u251c\u2500\u2500 obj
\u2502 \u2514\u2500\u2500 obj.pkl
\u251c\u2500\u2500 package_a
\u2502 \u251c\u2500\u2500 __init__.py
\u2502 \u2514\u2500\u2500 subpackage.py
\u251c\u2500\u2500 byteorder
\u2514\u2500\u2500 module_a.py
"""
)
export_include = dedent(
"""\
├── obj
└── obj.pkl
└── package_a
└── subpackage.py
\u251c\u2500\u2500 obj
\u2502 \u2514\u2500\u2500 obj.pkl
\u2514\u2500\u2500 package_a
\u2514\u2500\u2500 subpackage.py
"""
)
import_exclude = dedent(
"""\
├── .data
├── extern_modules
├── python_version
├── serialization_id
└── version
├── main
└── main
├── obj
└── obj.pkl
├── package_a
├── __init__.py
└── subpackage.py
├── byteorder
└── module_a.py
\u251c\u2500\u2500 .data
\u2502 \u251c\u2500\u2500 extern_modules
\u2502 \u251c\u2500\u2500 python_version
\u2502 \u251c\u2500\u2500 serialization_id
\u2502 \u2514\u2500\u2500 version
\u251c\u2500\u2500 main
\u2502 \u2514\u2500\u2500 main
\u251c\u2500\u2500 obj
\u2502 \u2514\u2500\u2500 obj.pkl
\u251c\u2500\u2500 package_a
\u2502 \u251c\u2500\u2500 __init__.py
\u2502 \u2514\u2500\u2500 subpackage.py
\u251c\u2500\u2500 byteorder
\u2514\u2500\u2500 module_a.py
"""
)

Expand Down
20 changes: 10 additions & 10 deletions test/package/test_resources.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,16 +25,16 @@ def test_resource_reader(self):
with PackageExporter(buffer) as pe:
# Layout looks like:
# package
# ├── one/
# ├── a.txt
# ├── b.txt
# ├── c.txt
# └── three/
# ├── d.txt
# └── e.txt
# └── two/
# ├── f.txt
# └── g.txt
# |-- one/
# | |-- a.txt
# | |-- b.txt
# | |-- c.txt
# | +-- three/
# | |-- d.txt
# | +-- e.txt
# +-- two/
# |-- f.txt
# +-- g.txt
pe.save_text("one", "a.txt", "hello, a!")
pe.save_text("one", "b.txt", "hello, b!")
pe.save_text("one", "c.txt", "hello, c!")
Expand Down
2 changes: 1 addition & 1 deletion test/test_jit.py
Original file line number Diff line number Diff line change
Expand Up @@ -15679,7 +15679,7 @@ def fn(string):
def test_unicode_comments(self):
@torch.jit.script
def test(self, a):
# 🤷🤷🤷🤷
# shrug
return torch.nn.functional.relu(a)

def test_get_set_state_with_tensors(self):
Expand Down
2 changes: 1 addition & 1 deletion test/test_jit_fuser.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def test_abs_cpu(self):
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
def test_abs_cpu_unicode_temp_dir(self):
with TemporaryDirectoryName(suffix='中文') as dname:
with TemporaryDirectoryName(suffix='\u4e2d\u6587') as dname:
shell_env = os.environ.copy()
shell_env['TMP'] = dname
cmd = [sys.executable, os.path.basename(__file__), type(self).__name__ + '.test_abs_cpu']
Expand Down
4 changes: 2 additions & 2 deletions test/test_linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -1950,7 +1950,7 @@ def test_eig_errors_and_warnings(self, device, dtype):

# if out tensor with floating dtype is passed for complex output an error is thrown
if not dtype.is_complex:
# The characteristic equation is p(λ) = λ^2 − 2λ + 5 = 0, with roots λ = 1±2i
# The characteristic equation is p(lambda) = lambda^2 - 2lambda + 5 = 0, with roots lambda = 1[+-]2i
a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)
out0 = torch.empty(0, device=device, dtype=dtype)
out1 = torch.empty(0, device=device, dtype=dtype)
Expand Down Expand Up @@ -2117,7 +2117,7 @@ def test_eigvals_errors_and_warnings(self, device, dtype):

# if out tensor with floating dtype is passed for complex output an error is thrown
if not dtype.is_complex:
# The characteristic equation is p(λ) = λ^2 − 2λ + 5 = 0, with roots λ = 1±2i
# The characteristic equation is p(lambda) = lambda^2 - 2lambda + 5 = 0, with roots lambda = 1[+-]2i
a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)
out = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvalues to be safely castable"):
Expand Down
2 changes: 1 addition & 1 deletion test/test_public_bindings.py
Original file line number Diff line number Diff line change
Expand Up @@ -428,7 +428,7 @@ def test_modules_can_be_imported(self):
def test_correct_module_names(self):
'''
An API is considered public, if its `__module__` starts with `torch.`
and there is no name in `__module__` or the object itself that starts with “_”.
and there is no name in `__module__` or the object itself that starts with "_".
Each public package should either:
- (preferred) Define `__all__` and all callables and classes in there must have their
`__module__` start with the current submodule's path. Things not in `__all__` should
Expand Down
2 changes: 1 addition & 1 deletion test/test_serialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -924,7 +924,7 @@ def test(name_or_buffer):
test(fname)

if IS_FILESYSTEM_UTF8_ENCODING:
with TemporaryDirectoryName(suffix='非ASCIIパス') as dname:
with TemporaryDirectoryName(suffix='\u975eASCII\u30d1\u30b9') as dname:
with TemporaryFileName(dir=dname) as fname:
test(fname)

Expand Down
4 changes: 2 additions & 2 deletions test/test_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -8046,7 +8046,7 @@ def assert_with_filename(filename):
assert_with_filename(fname)

if IS_FILESYSTEM_UTF8_ENCODING:
with TemporaryDirectoryName(suffix='中文') as dname, TemporaryFileName(dir=dname) as fname:
with TemporaryDirectoryName(suffix='\u4e2d\u6587') as dname, TemporaryFileName(dir=dname) as fname:
assert_with_filename(fname)

def test_torch_from_file(self):
Expand Down Expand Up @@ -8077,7 +8077,7 @@ def assert_with_filename(filename):
assert_with_filename(fname)

if IS_FILESYSTEM_UTF8_ENCODING:
with TemporaryDirectoryName(suffix='中文') as dname, TemporaryFileName(dir=dname) as fname:
with TemporaryDirectoryName(suffix='\u4e2d\u6587') as dname, TemporaryFileName(dir=dname) as fname:
assert_with_filename(fname)

def test_print(self):
Expand Down
2 changes: 1 addition & 1 deletion torch/_decomp/decompositions.py
Original file line number Diff line number Diff line change
Expand Up @@ -744,7 +744,7 @@ def slice_forward(
raise RuntimeError("slice step must be positive")

start_val = start if start is not None else 0
end_val = end if end is not None else sys.maxsize # 2^63 1
end_val = end if end is not None else sys.maxsize # 2^63 - 1

if start_val < 0:
start_val += sizes[dim]
Expand Down
2 changes: 1 addition & 1 deletion torch/_dynamo/polyfill.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def list_cmp(op: Callable[[Any, Any], bool], left: Sequence[Any], right: Sequenc


def dropwhile(predicate, iterable):
# dropwhile(lambda x: x<5, [1,4,6,4,1]) 6 4 1
# dropwhile(lambda x: x<5, [1,4,6,4,1]) -> 6 4 1
iterable = iter(iterable)
for x in iterable:
if not predicate(x):
Expand Down
4 changes: 2 additions & 2 deletions torch/_export/error.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,13 @@ class ExportErrorType(Enum):
# User providing invalid inputs to either tracer, or other public facing APIs
INVALID_INPUT_TYPE = 1

# User returning values from their models that we dont support.
# User returning values from their models that we don't support.
INVALID_OUTPUT_TYPE = 2

# Generated IR does not conform to Export IR Specification.
VIOLATION_OF_SPEC = 3

# Users code contains types and functionalities we dont support.
# User's code contains types and functionalities we don't support.
NOT_SUPPORTED = 4

# User's code didn't provide necessary details for us to successfully trace and export.
Expand Down
4 changes: 2 additions & 2 deletions torch/_functorch/autograd_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -498,7 +498,7 @@ def get_tangents_in_dims(input_dims, tangents):
# in_dims = 0
# vmap(Sum.apply, in_dims)(x)
#
# Lets assume for a moment that we didnt vmap setup_context in VmappedSum:
# Let's assume for a moment that we didn't vmap setup_context in VmappedSum:
#
# class VmappedSum(torch.autograd.Function):
# @staticmethod
Expand All @@ -519,7 +519,7 @@ def get_tangents_in_dims(input_dims, tangents):
# return gx
#
# We end up saving [B, 4] as x_shape. In the backward, gy has shape [B],
# and were doing:
# and we're doing:
#
# def backward_no_context(gy):
# return gy.expand([B, 4])
Expand Down
4 changes: 2 additions & 2 deletions torch/_inductor/codegen/memory_planning.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,8 @@ class LiveRange:
Invariant: begin <= end
"""

begin: float # int | ±inf
end: float # int | ±inf
begin: float # int | +/-inf
end: float # int | +/-inf

def contains(self, other: LiveRange):
"""Is other entirely within self"""
Expand Down
2 changes: 1 addition & 1 deletion torch/_meta_registrations.py
Original file line number Diff line number Diff line change
Expand Up @@ -5373,7 +5373,7 @@ def meta__scaled_dot_product_flash_attention_for_cpu_backward(
scale: Optional[float] = None,
):
# cpus's grad layout is different from cuda's,
# i.e. (batch_size, seq_lennum_heads, head_dim
# i.e. (batch_size, seq_len,num_heads, head_dim)
batch_size = query.size(0)
num_heads = query.size(1)
head_dim = query.size(3)
Expand Down
2 changes: 1 addition & 1 deletion torch/_numpy/_funcs_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -2008,7 +2008,7 @@ def min_scalar_type(a: ArrayLike, /):
from ._dtypes import DType

if a.numel() > 1:
# numpy docs: "For non-scalar array a, returns the vectors dtype unmodified."
# numpy docs: "For non-scalar array a, returns the vector's dtype unmodified."
return DType(a.dtype)

if a.dtype == torch.bool:
Expand Down
2 changes: 1 addition & 1 deletion torch/_refs/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -485,7 +485,7 @@ def _make_alias(fn, name):
"""
This function defines an alias of another function and sets its __name__ argument.
It also sets its __module__ argument to the module of the caller.
Note that when naïvely doing `alias = fn`, we have that `alias.__name__ == "fn"`, and
Note that when naively doing `alias = fn`, we have that `alias.__name__ == "fn"`, and
`alias.__module__ == fn.__module__`.
"""

Expand Down
2 changes: 1 addition & 1 deletion torch/_refs/nn/functional/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -600,7 +600,7 @@ def margin_ranking_loss(
margin: float = 0.0,
reduction: str = "mean",
) -> TensorLikeType:
# loss_without_reduction = max(0, target * (input1 input2) + margin)
# loss_without_reduction = max(0, -target * (input1 - input2) + margin)
if input1.ndim != input2.ndim or input1.ndim != target.ndim:
raise RuntimeError(
"margin_ranking_loss : All input tensors should have same dimension but got sizes: "
Expand Down
Loading

0 comments on commit a8574a9

Please sign in to comment.