Skip to content

Commit

Permalink
[BE]: Update flake8 to v6.1.0 and fix lints (pytorch#116591)
Browse files Browse the repository at this point in the history
Updates flake8 to v6.1.0 and fixes a few lints using sed and some ruff tooling.
- Replace `assert(0)` with `raise AssertionError()`
- Remove extraneous parenthesis i.e.
  - `assert(a == b)` -> `assert a == b`
  - `if(x > y or y < z):`->`if x > y or y < z:`
  - And `return('...')` -> `return '...'`

Co-authored-by: Nikita Shulga <[email protected]>

Pull Request resolved: pytorch#116591
Approved by: https://github.com/albanD, https://github.com/malfet
  • Loading branch information
Skylion007 authored and pytorchmergebot committed Jan 3, 2024
1 parent 09ee96b commit 3fe437b
Show file tree
Hide file tree
Showing 62 changed files with 188 additions and 190 deletions.
2 changes: 0 additions & 2 deletions .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@ max-line-length = 120
# E501 is not flexible enough, we're using B950 instead
ignore =
E203,E305,E402,E501,E721,E741,F405,F841,F999,W503,W504,C408,E302,W291,E303,
# fix these lints in the future
E275,
# shebang has extra meaning in fbcode lints, so I think it's not worth trying
# to line this up with executable bit
EXE001,
Expand Down
6 changes: 3 additions & 3 deletions .lintrunner.toml
Original file line number Diff line number Diff line change
Expand Up @@ -38,16 +38,16 @@ init_command = [
'python3',
'tools/linter/adapters/pip_init.py',
'--dry-run={{DRYRUN}}',
'flake8==6.0.0',
'flake8==6.1.0',
'flake8-bugbear==23.3.23',
'flake8-comprehensions==3.12.0',
'flake8-executable==2.1.3',
'flake8-logging-format==0.9.0',
'flake8-pyi==23.3.1',
'flake8-simplify==0.19.3',
'mccabe==0.7.0',
'pycodestyle==2.10.0',
'pyflakes==3.0.1',
'pycodestyle==2.11.1',
'pyflakes==3.1.0',
'torchfix==0.2.0',
]

Expand Down
2 changes: 1 addition & 1 deletion test/ao/sparsity/test_structured_sparsifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -981,7 +981,7 @@ def _test_update_mask_on_single_layer(self, expected_conv1, device):
pruner.prepare(model, config)
pruner.enable_mask_update = True
pruner.step()
assert pruner.groups[0]["module"].parametrizations.weight[0].mask[-1].item() is not False,\
assert pruner.groups[0]["module"].parametrizations.weight[0].mask[-1].item() is not False, \
"do not prune the least-norm filter"

# fusion step
Expand Down
2 changes: 1 addition & 1 deletion test/distributed/test_c10d_spawn.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
import torch.multiprocessing as mp
from torch.testing._internal.common_distributed import \
MultiProcessTestCase
from torch.testing._internal.common_utils import load_tests,\
from torch.testing._internal.common_utils import load_tests, \
NO_MULTIPROCESSING_SPAWN

# Torch distributed.nn is not available in windows
Expand Down
8 changes: 4 additions & 4 deletions test/functorch/common_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def loop(op, in_dims, out_dim, batch_size, *batched_args, **kwarg_values):
for idx in range(batch_size):
flat_args, args_spec = pytree.tree_flatten(batched_args)
flat_dims, dims_spec = pytree.tree_flatten(in_dims)
assert(args_spec == dims_spec)
assert args_spec == dims_spec
new_args = [a.select(in_dim, idx) if in_dim is not None else a for a, in_dim in zip(flat_args, flat_dims)]
out = op(*pytree.tree_unflatten(new_args, args_spec), **kwarg_values)
flat_out, out_spec = pytree.tree_flatten(out)
Expand All @@ -45,9 +45,9 @@ def loop2(op, in_dims1, in_dims2, out_dim1, out_dim2, batch_size1, batch_size2,
flat_args, args_spec = pytree.tree_flatten(batched_args)
flat_dims1, dims_spec1 = pytree.tree_flatten(in_dims1)
flat_dims2, dims_spec2 = pytree.tree_flatten(in_dims2)
assert(args_spec == dims_spec1)
assert(args_spec == dims_spec2)
assert(len(flat_dims1) == len(flat_dims2))
assert args_spec == dims_spec1
assert args_spec == dims_spec2
assert len(flat_dims1) == len(flat_dims2)
for idx1 in range(batch_size1):
out_split = []
arg_split = [a.select(in_dim1, idx1) if in_dim1 is not None else a for a, in_dim1 in zip(flat_args, flat_dims1)]
Expand Down
2 changes: 1 addition & 1 deletion test/fx/test_z3_gradual_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from torch.fx.experimental.migrate_gradual_types.constraint_generator import ConstraintGenerator
from torch.fx.experimental.migrate_gradual_types.constraint_transformation import transform_constraint
from torch.fx.experimental.migrate_gradual_types.operation import op_precision, op_matching, op_consistency
from torch.fx.experimental.migrate_gradual_types.transform_to_z3 import transform_all_constraints,\
from torch.fx.experimental.migrate_gradual_types.transform_to_z3 import transform_all_constraints, \
evaluate_conditional_with_constraints
from torch.fx.experimental.migrate_gradual_types.z3_types import tensor_type, D, z3_dyn
from torch.fx.experimental.rewriter import RewritingTracer
Expand Down
6 changes: 3 additions & 3 deletions test/jit/test_autodiff_subgraph_slicing.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,9 +154,9 @@ def func(input0: torch.Tensor, input1: torch.Tensor) -> Tuple[torch.Tensor, List
output_ref = func(input0, input1)
for i in range(2):
output = jit_f(input0, input1)
assert(output_ref[0].requires_grad == output[0].requires_grad)
assert(output_ref[1][0].requires_grad == output[1][0].requires_grad)
assert(output_ref[1][1].requires_grad == output[1][1].requires_grad)
assert output_ref[0].requires_grad == output[0].requires_grad
assert output_ref[1][0].requires_grad == output[1][0].requires_grad
assert output_ref[1][1].requires_grad == output[1][1].requires_grad

@unittest.skip("disable until we property handle tensor lists with undefined gradients")
def test_differentiable_graph_ops_requires_grad(self):
Expand Down
14 changes: 7 additions & 7 deletions test/jit/test_scriptmod_ann.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def forward(self, x: int):

with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), (1,))
assert(len(w) == 0)
assert len(w) == 0

def test_annotated_nonempty_container(self):
class M(torch.nn.Module):
Expand All @@ -49,7 +49,7 @@ def forward(self, x: List[int]):

with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), ([1, 2, 3],))
assert(len(w) == 0)
assert len(w) == 0

def test_annotated_empty_tensor(self):
class M(torch.nn.Module):
Expand All @@ -63,7 +63,7 @@ def forward(self, x: torch.Tensor):

with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), (torch.rand(2, 3),))
assert(len(w) == 0)
assert len(w) == 0

def test_annotated_with_jit_attribute(self):
class M(torch.nn.Module):
Expand All @@ -77,7 +77,7 @@ def forward(self, x: List[int]):

with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), ([1, 2, 3],))
assert(len(w) == 0)
assert len(w) == 0

def test_annotated_class_level_annotation_only(self):
class M(torch.nn.Module):
Expand All @@ -94,7 +94,7 @@ def forward(self, y: List[int]):

with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), ([1, 2, 3],))
assert(len(w) == 0)
assert len(w) == 0


def test_annotated_class_level_annotation_and_init_annotation(self):
Expand All @@ -112,7 +112,7 @@ def forward(self, y: List[int]):

with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), ([1, 2, 3],))
assert(len(w) == 0)
assert len(w) == 0

def test_annotated_class_level_jit_annotation(self):
class M(torch.nn.Module):
Expand All @@ -129,7 +129,7 @@ def forward(self, y: List[int]):

with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), ([1, 2, 3],))
assert(len(w) == 0)
assert len(w) == 0

def test_annotated_empty_list(self):
class M(torch.nn.Module):
Expand Down
18 changes: 9 additions & 9 deletions test/mobile/test_bytecode.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ class testVariousModelVersions(TestCase):
def test_get_model_bytecode_version(self):
def check_model_version(model_path, expect_version):
actual_version = _get_model_bytecode_version(model_path)
assert(actual_version == expect_version)
assert actual_version == expect_version
for version, model_info in SCRIPT_MODULE_BYTECODE_PKL.items():
model_path = pytorch_test_dir / "cpp" / "jit" / model_info["model_name"]
check_model_version(model_path, version)
Expand All @@ -174,7 +174,7 @@ def test_bytecode_values_for_all_backport_functions(self):

current_to_version = current_from_version - 1
backport_success = _backport_for_mobile(input_model_path, tmp_output_model_path_backport, current_to_version)
assert(backport_success)
assert backport_success

expect_bytecode_pkl = SCRIPT_MODULE_BYTECODE_PKL[current_to_version]["bytecode_pkl"]

Expand All @@ -187,7 +187,7 @@ def test_bytecode_values_for_all_backport_functions(self):
acutal_result_clean = "".join(output.split())
expect_result_clean = "".join(expect_bytecode_pkl.split())
isMatch = fnmatch.fnmatch(acutal_result_clean, expect_result_clean)
assert(isMatch)
assert isMatch

current_from_version -= 1
shutil.rmtree(tmpdirname)
Expand Down Expand Up @@ -254,7 +254,7 @@ def test_backport_bytecode_from_file_to_file(self):
script_module_v5_path,
tmp_backport_model_path,
maximum_checked_in_model_version - 1)
assert(success)
assert success

buf = io.StringIO()
torch.utils.show_pickle.main(
Expand All @@ -266,7 +266,7 @@ def test_backport_bytecode_from_file_to_file(self):
acutal_result_clean = "".join(output.split())
expect_result_clean = "".join(expected_result.split())
isMatch = fnmatch.fnmatch(acutal_result_clean, expect_result_clean)
assert(isMatch)
assert isMatch

# Load model v4 and run forward method
mobile_module = _load_for_lite_interpreter(str(tmp_backport_model_path))
Expand All @@ -291,7 +291,7 @@ def test_backport_bytecode_from_file_to_buffer(self):
# Check version of the model v4 from backport
bytesio = io.BytesIO(script_module_v4_buffer)
backport_version = _get_model_bytecode_version(bytesio)
assert(backport_version == maximum_checked_in_model_version - 1)
assert backport_version == maximum_checked_in_model_version - 1

# Load model v4 from backport and run forward method
bytesio = io.BytesIO(script_module_v4_buffer)
Expand All @@ -306,8 +306,8 @@ def test_get_model_ops_and_info(self):
# TODO update this to be more in the style of the above tests after a backport from 6 -> 5 exists
script_module_v6 = pytorch_test_dir / "cpp" / "jit" / "script_module_v6.ptl"
ops_v6 = _get_model_ops_and_info(script_module_v6)
assert(ops_v6["aten::add.int"].num_schema_args == 2)
assert(ops_v6["aten::add.Scalar"].num_schema_args == 2)
assert ops_v6["aten::add.int"].num_schema_args == 2
assert ops_v6["aten::add.Scalar"].num_schema_args == 2

def test_get_mobile_model_contained_types(self):
class MyTestModule(torch.nn.Module):
Expand All @@ -322,7 +322,7 @@ def forward(self, x):
buffer = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
type_list = _get_mobile_model_contained_types(buffer)
assert(len(type_list) >= 0)
assert len(type_list) >= 0

if __name__ == '__main__':
run_tests()
2 changes: 1 addition & 1 deletion test/mobile/test_lite_script_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def forward(self, x, y, z):
buffer = io.BytesIO(exported_module)
buffer.seek(0)

assert(b"callstack_debug_map.pkl" in exported_module)
assert b"callstack_debug_map.pkl" in exported_module

mobile_module = _load_for_lite_interpreter(buffer)
with self.assertRaisesRegex(RuntimeError, r"Module hierarchy:top\(B\)::<unknown>.A0\(A\)::forward.aten::mul"):
Expand Down
4 changes: 2 additions & 2 deletions test/nn/test_dropout.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,8 @@ def test_native_dropout_corner_case(self):
o_ref = torch.dropout(x_ref, p, train)
o.sum().backward()
o_ref.sum().backward()
assert(o.equal(o_ref))
assert(x.grad.equal(x_ref.grad))
assert o.equal(o_ref)
assert x.grad.equal(x_ref.grad)

def test_invalid_dropout_p(self):
v = torch.ones(1)
Expand Down
2 changes: 1 addition & 1 deletion test/optim/test_optim.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ def _test_basic_cases_template(
pass
elif constructor_accepts_maximize:

def four_arg_constructor(weight, bias, maximize, foreach):
def four_arg_constructor(weight, bias, maximize, foreach): # noqa: F811
self.assertFalse(foreach)
return constructor(weight, bias, maximize)

Expand Down
12 changes: 6 additions & 6 deletions test/quantization/core/test_quantized_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -2127,7 +2127,7 @@ def test_qtopk(self):

quantized_out = torch.topk(qX, k, dim=dim, largest=larg, sorted=sort)

assert(len(unquantized_out) == len(quantized_out))
assert len(unquantized_out) == len(quantized_out)
torch.testing.assert_close(quantized_out[0].dequantize(), unquantized_out[0])
torch.testing.assert_close(quantized_out[1], unquantized_out[1])

Expand Down Expand Up @@ -3813,7 +3813,7 @@ def test_qlinear(self):
post_op = 'none'
cases = itertools.product(batch_size_list, input_channels_list, output_channels_list,
use_bias_list, use_multi_dim_input_list, use_channelwise_list)
for batch_size, input_channels, output_channels, use_bias,\
for batch_size, input_channels, output_channels, use_bias, \
use_multi_dim_input, use_channelwise in cases:
self._test_qlinear_impl(batch_size, input_channels, output_channels,
use_bias, post_op, use_multi_dim_input, use_channelwise)
Expand All @@ -3830,7 +3830,7 @@ def test_qlinear_relu(self):
post_op = 'relu'
cases = itertools.product(batch_size_list, input_channels_list, output_channels_list,
use_bias_list, use_multi_dim_input_list, use_channelwise_list)
for batch_size, input_channels, output_channels, use_bias,\
for batch_size, input_channels, output_channels, use_bias, \
use_multi_dim_input, use_channelwise in cases:
self._test_qlinear_impl(batch_size, input_channels, output_channels,
use_bias, post_op, use_multi_dim_input, use_channelwise)
Expand Down Expand Up @@ -4101,7 +4101,7 @@ def test_qlinear_unpack(self, W, use_channelwise):
qparams=hu.qparams(dtypes=torch.qint8)))
@override_qengines
def test_qlinear_qnnpack_free_memory_and_unpack(self, W):
assert(qengine_is_qnnpack)
assert qengine_is_qnnpack
W, (W_scale, W_zp, torch_type) = W
qlinear_prepack = torch.ops.quantized.linear_prepack
qlinear_unpack = torch.ops.quantized.linear_unpack
Expand Down Expand Up @@ -4140,7 +4140,7 @@ def test_qlinear_leaky_relu(self):
cases = itertools.product(batch_size_list, input_channels_list, output_channels_list,
use_bias_list, use_multi_dim_input_list,
use_channelwise_list, negative_slopes_list)
for batch_size, input_channels, output_channels, use_bias,\
for batch_size, input_channels, output_channels, use_bias, \
use_multi_dim_input, use_channelwise, neg_slope in cases:
self._test_qlinear_impl(batch_size, input_channels, output_channels,
use_bias, post_op, use_multi_dim_input,
Expand All @@ -4159,7 +4159,7 @@ def test_qlinear_tanh(self):
cases = itertools.product(batch_size_list, input_channels_list,
output_channels_list, use_bias_list,
use_multi_dim_input_list, use_channelwise_list)
for batch_size, input_channels, output_channels, use_bias,\
for batch_size, input_channels, output_channels, use_bias, \
use_multi_dim_input, use_channelwise in cases:
self._test_qlinear_impl(batch_size, input_channels, output_channels,
use_bias, post_op, use_multi_dim_input,
Expand Down
2 changes: 1 addition & 1 deletion test/quantization/core/test_workflow_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -879,7 +879,7 @@ def _test_backward_per_channel_cachemask_impl(self, device):
Y_prime.backward(dout)
np.testing.assert_allclose(
dX.cpu().detach().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
assert(X.grad.dtype == float_type)
assert X.grad.dtype == float_type


def test_backward_per_channel_cachemask_cpu(self):
Expand Down
2 changes: 1 addition & 1 deletion test/quantization/eager/test_quantize_eager_qat.py
Original file line number Diff line number Diff line change
Expand Up @@ -839,7 +839,7 @@ def compose(functions):
return reduce(lambda f, g: lambda x: f(g(x)), functions[::-1], lambda x: x)

if not use_relu:
def relu_op(x):
def relu_op(x): # noqa: F811
return x

if freeze_bn:
Expand Down
4 changes: 2 additions & 2 deletions test/test_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -4319,7 +4319,7 @@ def get_id():
]
for thread, ranges in threads:
for range in ranges:
assert(len(range) == 3)
assert len(range) == 3
events.append(
FunctionEvent(
id=range[2],
Expand All @@ -4340,7 +4340,7 @@ def get_id():
def get_children_ids(event):
return [child.id for child in event.cpu_children]

assert([get_children_ids(event) for event in events] == res)
assert [get_children_ids(event) for event in events] == res

def test_profiler_aggregation_table(self):
"""
Expand Down
2 changes: 1 addition & 1 deletion test/test_bundled_images.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def save_and_load(sm):
def bundle_jpeg_image(img_tensor, quality):
# turn NCHW to HWC
if img_tensor.dim() == 4:
assert(img_tensor.size(0) == 1)
assert img_tensor.size(0) == 1
img_tensor = img_tensor[0].permute(1, 2, 0)
pixels = img_tensor.numpy()
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
Expand Down
2 changes: 1 addition & 1 deletion test/test_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -1388,7 +1388,7 @@ def test_grad_scale_will_not_overflow(self):
scaler.scale(l).backward()
scaler.step(optimizer)
scaler.update()
assert(scaler._scale != float('inf') and scaler._scale != float('nan'))
assert scaler._scale != float('inf') and scaler._scale != float('nan')

def test_grad_scaling_clipping(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
Expand Down
Loading

0 comments on commit 3fe437b

Please sign in to comment.