Skip to content

Commit

Permalink
Enable possibly-undefined error code (pytorch#118533)
Browse files Browse the repository at this point in the history
Fixes pytorch#118129

Suppressions automatically added with

```
import re

with open("error_file.txt", "r") as f:
    errors = f.readlines()

error_lines = {}
for error in errors:
    match = re.match(r"(.*):(\d+):\d+: error:.*\[(.*)\]", error)
    if match:
        file_path, line_number, error_type = match.groups()
        if file_path not in error_lines:
            error_lines[file_path] = {}
        error_lines[file_path][int(line_number)] = error_type

for file_path, lines in error_lines.items():
    with open(file_path, "r") as f:
        code = f.readlines()
    for line_number, error_type in sorted(lines.items(), key=lambda x: x[0], reverse=True):
        code[line_number - 1] = code[line_number - 1].rstrip() + f"  # type: ignore[{error_type}]\n"
    with open(file_path, "w") as f:
        f.writelines(code)
```

Signed-off-by: Edward Z. Yang <[email protected]>

Co-authored-by: Catherine Lee <[email protected]>
Pull Request resolved: pytorch#118533
Approved by: https://github.com/Skylion007, https://github.com/zou3519
  • Loading branch information
clee2000 authored and pytorchmergebot committed Jan 30, 2024
1 parent e332653 commit 4f5785b
Show file tree
Hide file tree
Showing 94 changed files with 200 additions and 197 deletions.
1 change: 1 addition & 0 deletions mypy.ini
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ show_column_numbers = True
check_untyped_defs = True
follow_imports = normal
local_partial_types = True
enable_error_code = possibly-undefined

# do not reenable this:
# https://github.com/pytorch/pytorch/pull/60006#issuecomment-866130657
Expand Down
1 change: 1 addition & 0 deletions test/typing/pass/creation_ops.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
# mypy: disable-error-code="possibly-undefined"
# flake8: noqa
import torch
from torch.testing._internal.common_utils import TEST_NUMPY
Expand Down
1 change: 1 addition & 0 deletions test/typing/reveal/tensor_constructors.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
# mypy: disable-error-code="possibly-undefined"
# flake8: noqa
import torch
from torch.testing._internal.common_utils import TEST_NUMPY
Expand Down
2 changes: 1 addition & 1 deletion torch/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -515,7 +515,7 @@ def fn(a):
sym_sqrt = current_module._sym_sqrt
__all__.append("sym_sqrt")

del fn, name, sym_name, current_module
del fn, name, sym_name, current_module # type: ignore[possibly-undefined]


def sym_ite(b, t, f):
Expand Down
2 changes: 1 addition & 1 deletion torch/_decomp/decompositions.py
Original file line number Diff line number Diff line change
Expand Up @@ -2832,7 +2832,7 @@ def _rnn_helper(
final_hiddens.append(bwd_hidden)

if bidirectional:
input = torch.cat([fwd_inp, bwd_inp], fwd_inp.dim() - 1)
input = torch.cat([fwd_inp, bwd_inp], fwd_inp.dim() - 1) # type: ignore[possibly-undefined]
else:
input = fwd_inp

Expand Down
4 changes: 2 additions & 2 deletions torch/_dynamo/convert_frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def _fn(*args, **kwargs):
random.setstate(py_rng_state)
torch.random.set_rng_state(torch_rng_state)
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
torch.cuda.set_rng_state(cuda_rng_state) # type: ignore[possibly-undefined]
torch.fx.graph_module._forward_from_src = prior_fwd_from_src
assert (
guards.check()
Expand Down Expand Up @@ -568,7 +568,7 @@ def log_bytecode(prefix, name, filename, line_no, code):
code.co_name,
code.co_filename,
code.co_firstlineno,
out_code,
out_code, # type: ignore[possibly-undefined]
)

for hook in _bytecode_hooks.values():
Expand Down
2 changes: 1 addition & 1 deletion torch/_dynamo/debug_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
"//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu",
"//deeplearning/fbgemm/fbgemm_gpu:sparse_ops",
]
cur_target = libfb.py.build_info.BuildInfo.get_build_rule().replace("fbcode:", "//")
cur_target = libfb.py.build_info.BuildInfo.get_build_rule().replace("fbcode:", "//") # type: ignore[possibly-undefined]
extra_imports = "\n".join([f'torch.ops.load_library("{x}")' for x in extra_deps])


Expand Down
2 changes: 1 addition & 1 deletion torch/_dynamo/eval_frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -1430,7 +1430,7 @@ def graph_with_interpreter(*args):
example_fake_inputs,
graph_captured_input,
graph_captured_result,
result_traced,
result_traced, # type: ignore[possibly-undefined]
flat_args_dynamic_dims,
)
# Store constraints and inputs as metadata for user passes, e.g. turn constraints to runtime check
Expand Down
2 changes: 1 addition & 1 deletion torch/_dynamo/symbolic_convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -1115,7 +1115,7 @@ def POP_FINALLY(self, inst):
tos = self.pop()
_ = self.pop()
if preserve_tos:
self.push(tos)
self.push(tos) # type: ignore[possibly-undefined]

def FOR_ITER(self, inst):
it = self.pop().realize()
Expand Down
2 changes: 1 addition & 1 deletion torch/_dynamo/test_minifier_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def _maybe_subprocess_run(self, args, *, isolate, cwd=None):
finally:
log.removeHandler(log_handler)
if cwd is not None:
os.chdir(prev_cwd)
os.chdir(prev_cwd) # type: ignore[possibly-undefined]
# Make sure we don't leave buggy compiled frames lying
# around
torch._dynamo.reset()
Expand Down
4 changes: 2 additions & 2 deletions torch/_dynamo/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -773,7 +773,7 @@ def preserve_rng_state():
with torch.utils._python_dispatch._disable_current_modes():
torch.random.set_rng_state(rng_state)
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
torch.cuda.set_rng_state(cuda_rng_state) # type: ignore[possibly-undefined]


def is_jit_model(model0):
Expand Down Expand Up @@ -892,7 +892,7 @@ def timed(model, example_inputs, times=1):
result = model(*example_inputs)
synchronize()
t1 = time.perf_counter()
return result, t1 - t0
return result, t1 - t0 # type: ignore[possibly-undefined]


def check_is_cuda(gm, example_inputs):
Expand Down
2 changes: 1 addition & 1 deletion torch/_functorch/autograd_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ def wrap_outputs_maintaining_identity(
result.append(unwrapped_input_to_orig_input[id(output)])
continue
if out_dims_specified:
result.append(wrap_fn(output, flat_out_dims[i])) # type: ignore[index]
result.append(wrap_fn(output, flat_out_dims[i])) # type: ignore[possibly-undefined, index]
else:
result.append(wrap_fn(output))

Expand Down
2 changes: 1 addition & 1 deletion torch/_higher_order_ops/triton_kernel_wrap.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def parse_ttir(ttir, kwargs):
return None

try:
import lark
import lark # type: ignore[import-not-found]
from lark import Lark, Transformer, v_args
except ModuleNotFoundError:
warnings.warn(
Expand Down
10 changes: 5 additions & 5 deletions torch/_inductor/autotune_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -440,25 +440,25 @@ def benchmark(
output_tensor = self.output_tensor_meta.to_tensor()

if debug:
create_tensor_elapse = time.time() - start_ts
create_tensor_elapse = time.time() - start_ts # type: ignore[possibly-undefined]
start_ts = time.time()

fn = self.make_run_fn(*input_tensors, output_tensor=output_tensor)

if debug:
load_elapse = time.time() - start_ts
load_elapse = time.time() - start_ts # type: ignore[possibly-undefined]
start_ts = time.time()

out = do_bench(fn)
torch.cuda.synchronize() # shake out any CUDA errors

if debug:
bench_elapse = time.time() - start_ts
bench_elapse = time.time() - start_ts # type: ignore[possibly-undefined]
log.debug(
"InChildProcess %s: load %f, create tensor %f, bench %f",
str(self),
load_elapse,
create_tensor_elapse,
load_elapse, # type: ignore[possibly-undefined]
create_tensor_elapse, # type: ignore[possibly-undefined]
bench_elapse,
)
self.cleanup_run_fn()
Expand Down
4 changes: 2 additions & 2 deletions torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def ir_to_evt_string(
result = pnode.inner_fn(index)
# each epilogue node results in a single "using" statement and may refer to the previous steps by name
formatter.aliases[node.name] = result
res = formatter.getvalue(result)
res = formatter.getvalue(result) # type: ignore[possibly-undefined]
if _MAGIC_SYMPY_ERROR_STRING in res:
raise CUTLASSEVTOpNotImplementedError(
"sympy / indexing expressions not yet supported in EVT fusion"
Expand Down Expand Up @@ -266,7 +266,7 @@ def ir_to_evt_argument_string(
if node.name is not None:
formatter.aliases[node.name] = result

res: str = formatter.getvalue(result)
res: str = formatter.getvalue(result) # type: ignore[possibly-undefined]
if _MAGIC_SYMPY_ERROR_STRING in res:
raise CUTLASSEVTOpNotImplementedError(
"sympy / indexing expressions not yet supported in EVT fusion"
Expand Down
2 changes: 1 addition & 1 deletion torch/_inductor/codegen/wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def get_cpp_op_schema(kernel: torch._ops.OpOverload) -> str:
cpp_return_value = f"std::tuple<{tuple_returns}>"

cpp_arg_type = [f"{convert_arg_type(arg)} {arg.name}" for arg in args]
return f"{cpp_return_value}({', '.join(cpp_arg_type)})"
return f"{cpp_return_value}({', '.join(cpp_arg_type)})" # type: ignore[possibly-undefined]


# TODO: Move to a well known place
Expand Down
4 changes: 2 additions & 2 deletions torch/_inductor/comm_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ def estimate_nccl_collective_runtime(snode: "BaseSchedulerNode") -> float:
nsteps = nRanks - 1

# Convert bus BW to algorithm BW (tensor bytes / algoBW = actual execution time)
ratio = (1.0 * nRanks) / nsteps
ratio = (1.0 * nRanks) / nsteps # type: ignore[possibly-undefined]
bandwidth = busBw * ratio
# Convert GB/s to GB/ns
bandwidth_GB_per_ns = bandwidth / 1e9
Expand All @@ -236,7 +236,7 @@ def estimate_nccl_collective_runtime(snode: "BaseSchedulerNode") -> float:
if nNodes > 1:
netOverhead = 1.0 # getNetOverhead(comm);
intraLat = max(intraLat, netOverhead)
latency += (nsteps - nInterSteps) * intraLat + nInterSteps * interLat
latency += (nsteps - nInterSteps) * intraLat + nInterSteps * interLat # type: ignore[possibly-undefined]
# Convert us to ns
latency_ns = latency * 1e3

Expand Down
8 changes: 4 additions & 4 deletions torch/_inductor/fx_passes/group_batch_fusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,9 +170,9 @@ def fuse(self, graph: torch.fx.GraphModule, subset: List[torch.fx.Node]):
input, weight = node.args
bias = None
batch_nodes.append(node)
batch_inputs.append(input)
batch_weights.append(weight)
batch_biases.append(bias)
batch_inputs.append(input) # type: ignore[possibly-undefined]
batch_weights.append(weight) # type: ignore[possibly-undefined]
batch_biases.append(bias) # type: ignore[possibly-undefined]

with graph.inserting_before(subset[-1]):
fused_inputs = decompose_stack(graph, batch_inputs)
Expand All @@ -191,7 +191,7 @@ def fuse(self, graph: torch.fx.GraphModule, subset: List[torch.fx.Node]):
new_bias_add = graph.call_function(
aten.add, args=((batch_biases[i], new_mm))
)
new_mm_cont = new_bias_add if has_bias else new_mm
new_mm_cont = new_bias_add if has_bias else new_mm # type: ignore[possibly-undefined]
original_mm.replace_all_uses_with(new_mm_cont)
new_mm_cont.meta.update(original_mm.meta)
graph.erase_node(original_mm)
Expand Down
4 changes: 2 additions & 2 deletions torch/_inductor/fx_passes/mkldnn_fusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ def fn(match, *args, **kwargs):
L[aten.mul](out, negative_slope),
)
if lowp_dtype:
out = L[prims.convert_element_type.default](out, dtype=dtype2)
out = L[prims.convert_element_type.default](out, dtype=dtype2) # type: ignore[possibly-undefined]
return out

return fn
Expand Down Expand Up @@ -324,7 +324,7 @@ def fn(match, *args, **kwargs):
out = L[prims.convert_element_type.default](out, dtype=torch.float)
out = L[aten.clamp_max](L[aten.clamp_min](out, min_value), max_value)
if lowp_dtype:
out = L[prims.convert_element_type.default](out, dtype=dtype2)
out = L[prims.convert_element_type.default](out, dtype=dtype2) # type: ignore[possibly-undefined]
return out

return fn
Expand Down
2 changes: 1 addition & 1 deletion torch/_inductor/fx_passes/pre_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def pre_grad_passes(gm: torch.fx.GraphModule, example_inputs):

gm_after_fx_passes = gm.__copy__()
numeric_check_if_enabled(
gm_before_fx_passes,
gm_before_fx_passes, # type: ignore[possibly-undefined]
gm_after_fx_passes,
example_inputs,
config.fx_passes_numeric_check.get("num_iterations", 1),
Expand Down
10 changes: 5 additions & 5 deletions torch/_inductor/fx_passes/quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -1360,7 +1360,7 @@ def qconv_weight_prepack(match: Match, *args, **kwargs):
graph.erase_node(conv_node)
# Erase the dequant pattern
if dtype == torch.bfloat16:
graph.erase_node(convert_to_bf16)
graph.erase_node(convert_to_bf16) # type: ignore[possibly-undefined]
# Erase the dequant pattern
graph.erase_node(mul_node)
graph.erase_node(sub_node)
Expand All @@ -1369,7 +1369,7 @@ def qconv_weight_prepack(match: Match, *args, **kwargs):
if clone_node is not None:
graph.erase_node(clone_node)
if dtype == torch.bfloat16:
graph.erase_node(weight_to_bf16_node)
graph.erase_node(weight_to_bf16_node) # type: ignore[possibly-undefined]
graph.erase_node(dequant_per_channel)
counters["inductor"]["qconv2d_weight_prepack_matcher_count"] += 1
counters["inductor"]["qconv2d_weight_prepack_matcher_nodes"] += len(
Expand Down Expand Up @@ -1697,14 +1697,14 @@ def qlinear_weight_prepack(match: Match, *args, **kwargs):
if input_contiguous:
graph.erase_node(output_reshape_node)
elif not input_contiguous and bias:
graph.erase_node(output_add_node_for_bias)
graph.erase_node(output_add_node_for_bias) # type: ignore[possibly-undefined]
graph.erase_node(linear_node)
if input_dim_exceeds_two:
if input_contiguous:
graph.erase_node(act_reshape_node)
else:
graph.erase_node(act_expand_node)
graph.erase_node(wgt_expand_node)
graph.erase_node(wgt_expand_node) # type: ignore[possibly-undefined]
if dtype == torch.bfloat16:
graph.erase_node(activation_to_bf16_node)
# Erase the dequant pattern
Expand All @@ -1714,7 +1714,7 @@ def qlinear_weight_prepack(match: Match, *args, **kwargs):
# Erase the dequant per channel pattern
graph.erase_node(t_node)
if dtype == torch.bfloat16:
graph.erase_node(weight_to_bf16_node)
graph.erase_node(weight_to_bf16_node) # type: ignore[possibly-undefined]
graph.erase_node(dequant_per_channel)

counters["inductor"]["qlinear_weight_prepack_matcher_count"] += 1
Expand Down
2 changes: 1 addition & 1 deletion torch/_inductor/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -857,7 +857,7 @@ def debug(msg):
):
debug("fallback_handler")
result = fallback_handler(n.target, add_to_fallback_set=False)(
*args, **kwargs
*args, **kwargs # type: ignore[possibly-undefined]
)
elif n.op == "call_function" and n.target in layout_constraints:
debug("layout_constraints")
Expand Down
6 changes: 3 additions & 3 deletions torch/_inductor/lowering.py
Original file line number Diff line number Diff line change
Expand Up @@ -607,7 +607,7 @@ def register_pointwise(
fn,
override_return_dtype=override_return_dtype,
override_fn_when_input_bool=override_fn_when_input_bool,
override_fn_when_cuda_float64=fn_libdevice if use_libdevice_for_f64 else None,
override_fn_when_cuda_float64=fn_libdevice if use_libdevice_for_f64 else None, # type: ignore[possibly-undefined]
allow_alpha=allow_alpha,
)
fn = register_lowering(
Expand Down Expand Up @@ -3630,8 +3630,8 @@ def accumulate(grad, out, index_ranges):
out = right_reflect[i]
index_range = (xyz[i], dhw[i] - padding_right[i], dhw[i] - 1)

outs.append(out)
index_ranges.append(index_range)
outs.append(out) # type: ignore[possibly-undefined]
index_ranges.append(index_range) # type: ignore[possibly-undefined]

grad = accumulate(grad, outs, index_ranges)

Expand Down
2 changes: 1 addition & 1 deletion torch/_inductor/pattern_matcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -1196,7 +1196,7 @@ def apply(self, graph: torch.fx.GraphModule) -> int:
if (
self.prevent_match_across_mutations
and is_match(m)
and len(set(map(get_mutation_region_id_partial, m.nodes))) != 1
and len(set(map(get_mutation_region_id_partial, m.nodes))) != 1 # type: ignore[possibly-undefined]
):
continue
if os.environ.get("TORCHINDUCTOR_PATTERN_MATCH_DEBUG") == node.name:
Expand Down
10 changes: 5 additions & 5 deletions torch/_inductor/scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -1038,7 +1038,7 @@ def fuse(cls, producer, consumer):
else:
fused_nodes.append(node)

return cls(producer.scheduler, fused_nodes, prev_node_1, prev_node_2)
return cls(producer.scheduler, fused_nodes, prev_node_1, prev_node_2) # type: ignore[possibly-undefined]

def __init__(
self,
Expand Down Expand Up @@ -2256,13 +2256,13 @@ def codegen(self):

if node.is_template():
node, *epilogue = node.get_nodes()
self.get_backend(device).codegen_template(node, epilogue)
self.get_backend(device).codegen_template(node, epilogue) # type: ignore[possibly-undefined]
elif node.is_extern():
self.codegen_extern_call(node)
elif node.is_foreach():
self.get_backend(device).codegen_foreach(node)
self.get_backend(device).codegen_foreach(node) # type: ignore[possibly-undefined]
elif isinstance(node, (FusedSchedulerNode, SchedulerNode)):
self.get_backend(device).codegen_nodes(node.get_nodes())
self.get_backend(device).codegen_nodes(node.get_nodes()) # type: ignore[possibly-undefined]
else:
assert isinstance(node, NopKernelSchedulerNode)
node.allocate()
Expand All @@ -2271,7 +2271,7 @@ def codegen(self):
V.graph.wrapper_code.generate_inf_and_nan_checker(node)

if config.triton.debug_sync_kernel:
self.get_backend(device).codegen_sync()
self.get_backend(device).codegen_sync() # type: ignore[possibly-undefined]

self.available_buffer_names.update(node.get_names())

Expand Down
2 changes: 1 addition & 1 deletion torch/_inductor/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,7 @@ def timed(
synchronize(device)
t1 = time.perf_counter()
# GC the result after timing
assert result is not None
assert result is not None # type: ignore[possibly-undefined]
return t1 - t0


Expand Down
Loading

0 comments on commit 4f5785b

Please sign in to comment.