Skip to content

Commit

Permalink
[BE]: Update ruff to 0.4.1 (pytorch#124549)
Browse files Browse the repository at this point in the history
Update ruff to 0.4.1 .
This version fixes a lot false negatives/false positives, is 20-40% faster, and has various other bug fixes.

Below is a before and after table showing the execution time of ruff lint and ruff format in milliseconds courtesy of https://astral.sh/blog/ruff-v0.4.0

| Repository                                         | Linter (v0.3) | Linter (v0.4) | Formatter (v0.3) | Formatter (v0.4) |
|----------------------------------------------------|---------------|---------------|------------------|------------------|
| [pytorch/pytorch](https://github.com/pytorch/pytorch) | 328.7         | 251.8         | 351.1            | 274.9            |

Pull Request resolved: pytorch#124549
Approved by: https://github.com/ezyang
  • Loading branch information
Skylion007 authored and pytorchmergebot committed Apr 21, 2024
1 parent f34905f commit 5a1216b
Show file tree
Hide file tree
Showing 70 changed files with 204 additions and 387 deletions.
2 changes: 1 addition & 1 deletion .lintrunner.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2168,7 +2168,7 @@ init_command = [
'python3',
'tools/linter/adapters/pip_init.py',
'--dry-run={{DRYRUN}}',
'ruff==0.3.0',
'ruff==0.4.1',
]
is_formatter = true

Expand Down
4 changes: 1 addition & 3 deletions benchmarks/distributed/pipeline/pipe.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,9 +187,7 @@ def get_last_device(model):
cur_loss = total_loss / log_interval
elapsed = time.time() - start_time
print(
"| batch {:5d} | wps {:5.2f} | loss {:5.2f} | ppl {:8.2f}".format(
i, word_counter / elapsed, cur_loss, math.exp(cur_loss)
)
f"| batch {i:5d} | wps {word_counter / elapsed:5.2f} | loss {cur_loss:5.2f} | ppl {math.exp(cur_loss):8.2f}"
)
word_counter = 0
total_loss = 0
Expand Down
4 changes: 1 addition & 3 deletions benchmarks/operator_benchmark/benchmark_caffe2.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,9 +185,7 @@ def generate_c2_test_from_ops(ops_metadata, bench_op, tags):
op = bench_op()
op.init(**test_attrs)
test_name = op.test_name("short")
input_config = "Shapes: {}, Type: {}, Args: {}".format(
op_metadata.input_dims, op_metadata.input_types, str(op_metadata.args)
)
input_config = f"Shapes: {op_metadata.input_dims}, Type: {op_metadata.input_types}, Args: {str(op_metadata.args)}"
test_config = TestConfig(test_name, input_config, tags, run_backward=False)
if op is not None:
create_caffe2_op_test_case(op, test_config)
Expand Down
2 changes: 1 addition & 1 deletion scripts/analysis/format_test_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@

out.writerow([hash, args.log_url, ""])

with open(args.file, "r") as f:
with open(args.file) as f:
reader = csv.DictReader(f)
for row in reader:
if row["status"] not in {"failed", "error"}:
Expand Down
1 change: 0 additions & 1 deletion scripts/compile_tests/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
warnings.warn(
"lxml was not found. `pip install lxml` to make this script run much faster"
)
from download_reports import download_reports


def open_test_results(directory):
Expand Down
1 change: 0 additions & 1 deletion scripts/compile_tests/download_reports.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import enum
import json
import os
import pprint
Expand Down
2 changes: 1 addition & 1 deletion scripts/compile_tests/update_failures.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def patch_file(

# These are hand written skips
extra_dynamo_skips = set()
with open(filename, "r") as f:
with open(filename) as f:
start = False
for text in f.readlines():
text = text.strip()
Expand Down
10 changes: 4 additions & 6 deletions scripts/diagnose_protobuf.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,13 +71,11 @@
https://github.com/google/protobuf/releases/
"""

VERSION_MISMATCH = """
Your python protobuf is of version {py_ver} but your native protoc version is of
version {native_ver}. This will cause the installation to produce incompatible
VERSION_MISMATCH = f"""
Your python protobuf is of version {python_version} but your native protoc version is of
version {native_version}. This will cause the installation to produce incompatible
protobuf files. This is bad in general - consider installing the same version.
""".format(
py_ver=python_version, native_ver=native_version
)
"""

# Now, give actual recommendations
if not python_protobuf_installed:
Expand Down
2 changes: 1 addition & 1 deletion scripts/export/update_schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
if next_version is not None and next_version != commit.result["SCHEMA_VERSION"]:
raise RuntimeError(
f"Schema version is not updated from {commit.base['SCHEMA_VERSION']} to {next_version}.\n"
+ f"Please either:\n"
+ "Please either:\n"
+ " 1. update schema.py to not break compatibility.\n"
+ " or 2. bump the schema version to the expected value.\n"
+ " or 3. use --force-unsafe to override schema.yaml (not recommended).\n "
Expand Down
2 changes: 1 addition & 1 deletion scripts/model_zoo/update-caffe2-models.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def generate_models():
caffe2_model_dir = sc._caffe2_model_dir(model)
onnx_model_dir, onnx_models_dir = sc._onnx_model_dir(model)
subprocess.check_call(["echo", model])
with open(os.path.join(caffe2_model_dir, "value_info.json"), "r") as f:
with open(os.path.join(caffe2_model_dir, "value_info.json")) as f:
value_info = f.read()
subprocess.check_call(
[
Expand Down
8 changes: 2 additions & 6 deletions scripts/model_zoo/update-models-from-caffe2.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,9 +90,7 @@ def download_onnx_model(model_name, zoo_dir, use_cache=True, only_local=False):
try:
download_file.close()
print(
"Downloading ONNX model {} from {} and save in {} ...\n".format(
model_name, url, download_file.name
)
f"Downloading ONNX model {model_name} from {url} and save in {download_file.name} ...\n"
)
urlretrieve(url, download_file.name)
with tarfile.open(download_file.name) as t:
Expand Down Expand Up @@ -300,9 +298,7 @@ def onnx_verify(onnx_model, inputs, ref_outputs):
)

print(f"Deleteing old ONNX {onnx_model_name} model...")
for f in glob.glob(
os.path.join(onnx_model_dir, "model*".format(onnx_model_name))
):
for f in glob.glob(os.path.join(onnx_model_dir, "model*".format())):
os.remove(f)

print(f"Serializing generated ONNX {onnx_model_name} model ...")
Expand Down
4 changes: 2 additions & 2 deletions scripts/release_notes/apply_categories.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,12 @@
category_csv = "results/category_data.csv"
commitlist_csv = "results/commitlist.csv"

with open(category_csv, "r") as category_data:
with open(category_csv) as category_data:
reader = csv.DictReader(category_data, commitlist.commit_fields)
rows = list(reader)
category_map = {row["commit_hash"]: row["category"] for row in rows}

with open(commitlist_csv, "r") as commitlist_data:
with open(commitlist_csv) as commitlist_data:
reader = csv.DictReader(commitlist_data, commitlist.commit_fields)
commitlist_rows = list(reader)

Expand Down
1 change: 0 additions & 1 deletion scripts/release_notes/classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchtext
from torchtext.functional import to_tensor
from tqdm import tqdm
Expand Down
1 change: 0 additions & 1 deletion scripts/release_notes/commitlist.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
features_to_dict,
frontend_categories,
get_commit_data_cache,
get_features,
run,
topics,
)
Expand Down
2 changes: 1 addition & 1 deletion scripts/release_notes/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ def get(self, commit):
return self.data[commit]

def read_from_disk(self):
with open(self.path, "r") as f:
with open(self.path) as f:
data = json.load(f)
data = {commit: dict_to_features(dct) for commit, dct in data.items()}
return data
Expand Down
5 changes: 2 additions & 3 deletions scripts/release_notes/namespace_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import torch

# Import all utils so that getattr below can find them
from torch.utils import bottleneck, checkpoint, model_zoo

all_submod_list = [
"",
Expand Down Expand Up @@ -69,10 +68,10 @@ def run(args, submod):
if not path.exists(new_filename):
raise RuntimeError("New version data not collected")

with open(prev_filename, "r") as f:
with open(prev_filename) as f:
prev_content = set(json.load(f))

with open(new_filename, "r") as f:
with open(new_filename) as f:
new_content = set(json.load(f))

if not args.show_all:
Expand Down
22 changes: 7 additions & 15 deletions test/cpp_api_parity/functional_impl_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,9 +158,7 @@ def camel_case_to_snake_case(camel_case_str):
return test_params_dict["cpp_function_call"].split("(")[0].replace("F::", "")
else:
raise RuntimeError(
"`cpp_options_args` or `cpp_function_call` entry must be present in test params dict:\n{}".format(
pprint.pformat(test_params_dict)
)
f"`cpp_options_args` or `cpp_function_call` entry must be present in test params dict:\n{pprint.pformat(test_params_dict)}" # noqa: B950
)


Expand All @@ -181,9 +179,7 @@ def compute_cpp_function_call(test_params_dict, arg_dict, functional_name):
)
else:
raise RuntimeError(
"`cpp_options_args` or `cpp_function_call` entry must be present in test params dict:\n{}".format(
pprint.pformat(test_params_dict)
)
f"`cpp_options_args` or `cpp_function_call` entry must be present in test params dict:\n{pprint.pformat(test_params_dict)}" # noqa: B950
)


Expand Down Expand Up @@ -221,12 +217,10 @@ def write_test_to_test_class(
or "cpp_function_call" in test_params_dict
), (
"To enable C++ API parity test, "
"`cpp_options_args` or `cpp_function_call` entry must be present in test params dict:\n{}. \n"
f"`cpp_options_args` or `cpp_function_call` entry must be present in test params dict:\n{pprint.pformat(test_params_dict)}. \n" # noqa: B950
"If you are interested in adding the C++ API parity test, please see:\n"
"NOTE [How to check NN module / functional API parity between Python and C++ frontends]. \n"
"If not, please add `test_cpp_api_parity=False` to the test params dict and file an issue about this."
).format(
pprint.pformat(test_params_dict)
)

assert not (
Expand All @@ -241,16 +235,14 @@ def write_test_to_test_class(

assert hasattr(
torch.nn.functional, functional_name
), "`torch.nn.functional` doesn't have function `{}`. (Discovered while processing\n{}.)".format(
functional_name, pprint.pformat(test_params_dict)
)
), f"`torch.nn.functional` doesn't have function `{functional_name}`. (Discovered while processing\n{pprint.pformat(test_params_dict)}.)" # noqa: B950

functional_full_name = "F::" + functional_name

assert functional_full_name in parity_table["torch::nn::functional"], (
"Please add `{}` entry to `torch::nn::functional` section of `test/cpp_api_parity/parity-tracker.md`. "
"(Discovered while processing\n{}.)"
).format(functional_full_name, pprint.pformat(test_params_dict))
f"Please add `{functional_full_name}` entry to `torch::nn::functional` section of `test/cpp_api_parity/parity-tracker.md`. "
f"(Discovered while processing\n{pprint.pformat(test_params_dict)}.)"
)

for device in devices:
test_params = process_test_params_for_functional(
Expand Down
6 changes: 3 additions & 3 deletions test/cpp_api_parity/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,9 +376,9 @@ def decorate_test_fn(test_fn, test_cuda, has_impl_parity, device):

def generate_error_msg(name, cpp_value, python_value):
return (
"Parity test failed: {} in C++ has value: {}, "
"which does not match the corresponding value in Python: {}.\n{}"
).format(name, cpp_value, python_value, MESSAGE_HOW_TO_FIX_CPP_PARITY_TEST_FAILURE)
f"Parity test failed: {name} in C++ has value: {cpp_value}, "
f"which does not match the corresponding value in Python: {python_value}.\n{MESSAGE_HOW_TO_FIX_CPP_PARITY_TEST_FAILURE}"
)


def try_remove_folder(folder_path):
Expand Down
4 changes: 1 addition & 3 deletions test/distributions/test_distributions.py
Original file line number Diff line number Diff line change
Expand Up @@ -4741,9 +4741,7 @@ def test_entropy_shape(self):
expected_shape = (
dist.batch_shape if dist.batch_shape else torch.Size()
)
message = "{} example {}/{}, shape mismatch. expected {}, actual {}".format(
Dist.__name__, i + 1, len(params), expected_shape, actual_shape
)
message = f"{Dist.__name__} example {i + 1}/{len(params)}, shape mismatch. expected {expected_shape}, actual {actual_shape}" # noqa: B950
self.assertEqual(actual_shape, expected_shape, msg=message)
except NotImplementedError:
continue
Expand Down
11 changes: 3 additions & 8 deletions test/quantization/core/test_workflow_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -924,18 +924,13 @@ def _test_learnable_backward_per_channel(self, X_base, device, scale_base, zero_

self.assertTrue(
torch.allclose(dX_expected, dX_actual, rtol=tolerance, atol=tolerance),
"Expected dX={} to match X.grad={}, X={}, s={}, z={}, dout={}, n_bits={}".format(
dX_expected, dX_actual, X_curr, scale_curr, zero_point_curr, dout, n_bits))
f"Expected dX={dX_expected} to match X.grad={dX_actual}, X={X_curr}, s={scale_curr}, z={zero_point_curr}, dout={dout}, n_bits={n_bits}") # noqa: B950
self.assertTrue(
torch.allclose(dScale_expected * grad_factor, dScale_actual, rtol=tolerance, atol=tolerance),
"Expected dScale={} to match scale.grad={}, X={}, s={}, z={}, dout={}, n_bits={}".format(
dScale_expected * grad_factor, dScale_actual,
X_curr, scale_curr, zero_point_curr, dout, n_bits))
f"Expected dScale={dScale_expected * grad_factor} to match scale.grad={dScale_actual}, X={X_curr}, s={scale_curr}, z={zero_point_curr}, dout={dout}, n_bits={n_bits}") # noqa: B950
self.assertTrue(
torch.allclose(dZeroPoint_expected * grad_factor, dZeroPoint_actual, rtol=tolerance, atol=tolerance),
"Expected dZeroPoint={} to match zero_point.grad={}, X={}, s={}, z={}, dout={}, n_bits={}".format(
dZeroPoint_expected * grad_factor, dZeroPoint_actual,
X_curr, scale_curr, zero_point_curr, dout, n_bits))
f"Expected dZeroPoint={dZeroPoint_expected * grad_factor} to match zero_point.grad={dZeroPoint_actual}, X={X_curr}, s={scale_curr}, z={zero_point_curr}, dout={dout}, n_bits={n_bits}") # noqa: B950
X_curr.grad.data.zero_()
scale_curr.grad.data.zero_()
zero_point_curr.grad.data.zero_()
Expand Down
4 changes: 1 addition & 3 deletions test/test_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -4696,9 +4696,7 @@ def test_profiler_aggregation_lstm(self):
) # make it us which is profiler default
print("Total time based on python measurements: ", _format_time(total_time_us))
print(
"CPU time measurement python side overhead: {:.2f}%".format(
(total_time_us / prof.self_cpu_time_total - 1.0) * 100.0
)
f"CPU time measurement python side overhead: {(total_time_us / prof.self_cpu_time_total - 1.0) * 100.0:.2f}%"
)

if sys.platform != "win32":
Expand Down
3 changes: 1 addition & 2 deletions test/test_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -1359,8 +1359,7 @@ def cast(val, to_type):
output_method = getattr(args[0], op)(*args[1:], **add_kwargs)
if isinstance(output_method, torch.Tensor):
self.assertTrue(out_type == output_method.dtype,
"autocast for torch.{} produced {}, should produce torch.{}"
.format(op, output_method.dtype, out_type))
f"autocast for torch.{op} produced {output_method.dtype}, should produce torch.{out_type}")

self.assertTrue((output is not None) or (output_method is not None),
f"{op} not found as an attribute on either Tensor or the requested module {module}")
Expand Down
3 changes: 1 addition & 2 deletions test/test_dataloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -2135,8 +2135,7 @@ def fail(reason):
elif exit_method == 'worker_kill':
if isinstance(loader_p.exception, RuntimeError):
if 'DataLoader worker (pid' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
fail(f'loader process did not raise expected exception, but had {loader_p.exception}')
elif isinstance(loader_p.exception, ConnectionRefusedError):
# Sometimes, when the worker is being killed and is freeing its
# resources, the unpickling in loader process will be met an
Expand Down
18 changes: 8 additions & 10 deletions test/test_mps.py
Original file line number Diff line number Diff line change
Expand Up @@ -1158,19 +1158,17 @@ def __exit__(self, exec_type, exec_value, traceback):
if caching_allocator_discrepancy and not driver_discrepancy:
# Just raises a warning if the leak is not validated by the driver API
msg = ("MPS caching allocator reports a memory leak not "
"verified by the driver API in {}! "
"Caching allocator allocated memory was {} and is now reported as {}. "
"MPS driver allocated memory was {} and is now {}.").format(
self.name, self.caching_allocator_before,
caching_allocator_mem_allocated, self.driver_before, driver_mem_allocated)
f"verified by the driver API in {self.name}! "
f"Caching allocator allocated memory was {self.caching_allocator_before} "
f"and is now reported as {caching_allocator_mem_allocated}. "
f"MPS driver allocated memory was {self.driver_before} and is now {driver_mem_allocated}.")
warnings.warn(msg)
elif caching_allocator_discrepancy and driver_discrepancy:
# A caching allocator discrepancy validated by the driver API is a failure
msg = ("MPS driver API confirmed a leak in {}! "
"Caching allocator allocated memory was {} and is now reported as {}. "
"MPS driver allocated memory was {} and is now {}.").format(
self.name, self.caching_allocator_before, caching_allocator_mem_allocated,
self.driver_before, driver_mem_allocated)
msg = (f"MPS driver API confirmed a leak in {self.name}! "
f"Caching allocator allocated memory was {self.caching_allocator_before} "
f"and is now reported as {caching_allocator_mem_allocated}. "
f"MPS driver allocated memory was {self.driver_before} and is now {driver_mem_allocated}.")

raise RuntimeError(msg)

Expand Down
Loading

0 comments on commit 5a1216b

Please sign in to comment.