Skip to content

Commit

Permalink
Revert "Enable possibly-undefined error code (pytorch#118533)"
Browse files Browse the repository at this point in the history
This reverts commit 4f13f69.

Reverted pytorch#118533 on behalf of https://github.com/clee2000 due to sorry i'm trying to figure out a codev merge conflict, if this works i'll be back to rebase and merge ([comment](pytorch#118533 (comment)))
  • Loading branch information
pytorchmergebot committed Jan 30, 2024
1 parent 6511811 commit 40ece2e
Show file tree
Hide file tree
Showing 94 changed files with 197 additions and 200 deletions.
1 change: 0 additions & 1 deletion mypy.ini
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ show_column_numbers = True
check_untyped_defs = True
follow_imports = normal
local_partial_types = True
enable_error_code = possibly-undefined

# do not reenable this:
# https://github.com/pytorch/pytorch/pull/60006#issuecomment-866130657
Expand Down
1 change: 0 additions & 1 deletion test/typing/pass/creation_ops.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
# mypy: disable-error-code="possibly-undefined"
# flake8: noqa
import torch
from torch.testing._internal.common_utils import TEST_NUMPY
Expand Down
1 change: 0 additions & 1 deletion test/typing/reveal/tensor_constructors.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
# mypy: disable-error-code="possibly-undefined"
# flake8: noqa
import torch
from torch.testing._internal.common_utils import TEST_NUMPY
Expand Down
2 changes: 1 addition & 1 deletion torch/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -515,7 +515,7 @@ def fn(a):
sym_sqrt = current_module._sym_sqrt
__all__.append("sym_sqrt")

del fn, name, sym_name, current_module # type: ignore[possibly-undefined]
del fn, name, sym_name, current_module


def sym_ite(b, t, f):
Expand Down
2 changes: 1 addition & 1 deletion torch/_decomp/decompositions.py
Original file line number Diff line number Diff line change
Expand Up @@ -2832,7 +2832,7 @@ def _rnn_helper(
final_hiddens.append(bwd_hidden)

if bidirectional:
input = torch.cat([fwd_inp, bwd_inp], fwd_inp.dim() - 1) # type: ignore[possibly-undefined]
input = torch.cat([fwd_inp, bwd_inp], fwd_inp.dim() - 1)
else:
input = fwd_inp

Expand Down
4 changes: 2 additions & 2 deletions torch/_dynamo/convert_frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def _fn(*args, **kwargs):
random.setstate(py_rng_state)
torch.random.set_rng_state(torch_rng_state)
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state) # type: ignore[possibly-undefined]
torch.cuda.set_rng_state(cuda_rng_state)
torch.fx.graph_module._forward_from_src = prior_fwd_from_src
assert (
guards.check()
Expand Down Expand Up @@ -568,7 +568,7 @@ def log_bytecode(prefix, name, filename, line_no, code):
code.co_name,
code.co_filename,
code.co_firstlineno,
out_code, # type: ignore[possibly-undefined]
out_code,
)

for hook in _bytecode_hooks.values():
Expand Down
2 changes: 1 addition & 1 deletion torch/_dynamo/debug_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
"//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu",
"//deeplearning/fbgemm/fbgemm_gpu:sparse_ops",
]
cur_target = libfb.py.build_info.BuildInfo.get_build_rule().replace("fbcode:", "//") # type: ignore[possibly-undefined]
cur_target = libfb.py.build_info.BuildInfo.get_build_rule().replace("fbcode:", "//")
extra_imports = "\n".join([f'torch.ops.load_library("{x}")' for x in extra_deps])


Expand Down
2 changes: 1 addition & 1 deletion torch/_dynamo/eval_frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -1430,7 +1430,7 @@ def graph_with_interpreter(*args):
example_fake_inputs,
graph_captured_input,
graph_captured_result,
result_traced, # type: ignore[possibly-undefined]
result_traced,
flat_args_dynamic_dims,
)
# Store constraints and inputs as metadata for user passes, e.g. turn constraints to runtime check
Expand Down
2 changes: 1 addition & 1 deletion torch/_dynamo/symbolic_convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -1115,7 +1115,7 @@ def POP_FINALLY(self, inst):
tos = self.pop()
_ = self.pop()
if preserve_tos:
self.push(tos) # type: ignore[possibly-undefined]
self.push(tos)

def FOR_ITER(self, inst):
it = self.pop().realize()
Expand Down
2 changes: 1 addition & 1 deletion torch/_dynamo/test_minifier_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def _maybe_subprocess_run(self, args, *, isolate, cwd=None):
finally:
log.removeHandler(log_handler)
if cwd is not None:
os.chdir(prev_cwd) # type: ignore[possibly-undefined]
os.chdir(prev_cwd)
# Make sure we don't leave buggy compiled frames lying
# around
torch._dynamo.reset()
Expand Down
4 changes: 2 additions & 2 deletions torch/_dynamo/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -773,7 +773,7 @@ def preserve_rng_state():
with torch.utils._python_dispatch._disable_current_modes():
torch.random.set_rng_state(rng_state)
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state) # type: ignore[possibly-undefined]
torch.cuda.set_rng_state(cuda_rng_state)


def is_jit_model(model0):
Expand Down Expand Up @@ -892,7 +892,7 @@ def timed(model, example_inputs, times=1):
result = model(*example_inputs)
synchronize()
t1 = time.perf_counter()
return result, t1 - t0 # type: ignore[possibly-undefined]
return result, t1 - t0


def check_is_cuda(gm, example_inputs):
Expand Down
2 changes: 1 addition & 1 deletion torch/_functorch/autograd_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ def wrap_outputs_maintaining_identity(
result.append(unwrapped_input_to_orig_input[id(output)])
continue
if out_dims_specified:
result.append(wrap_fn(output, flat_out_dims[i])) # type: ignore[possibly-undefined, index]
result.append(wrap_fn(output, flat_out_dims[i])) # type: ignore[index]
else:
result.append(wrap_fn(output))

Expand Down
2 changes: 1 addition & 1 deletion torch/_higher_order_ops/triton_kernel_wrap.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def parse_ttir(ttir, kwargs):
return None

try:
import lark # type: ignore[import-not-found]
import lark
from lark import Lark, Transformer, v_args
except ModuleNotFoundError:
warnings.warn(
Expand Down
10 changes: 5 additions & 5 deletions torch/_inductor/autotune_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -440,25 +440,25 @@ def benchmark(
output_tensor = self.output_tensor_meta.to_tensor()

if debug:
create_tensor_elapse = time.time() - start_ts # type: ignore[possibly-undefined]
create_tensor_elapse = time.time() - start_ts
start_ts = time.time()

fn = self.make_run_fn(*input_tensors, output_tensor=output_tensor)

if debug:
load_elapse = time.time() - start_ts # type: ignore[possibly-undefined]
load_elapse = time.time() - start_ts
start_ts = time.time()

out = do_bench(fn)
torch.cuda.synchronize() # shake out any CUDA errors

if debug:
bench_elapse = time.time() - start_ts # type: ignore[possibly-undefined]
bench_elapse = time.time() - start_ts
log.debug(
"InChildProcess %s: load %f, create tensor %f, bench %f",
str(self),
load_elapse, # type: ignore[possibly-undefined]
create_tensor_elapse, # type: ignore[possibly-undefined]
load_elapse,
create_tensor_elapse,
bench_elapse,
)
self.cleanup_run_fn()
Expand Down
4 changes: 2 additions & 2 deletions torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def ir_to_evt_string(
result = pnode.inner_fn(index)
# each epilogue node results in a single "using" statement and may refer to the previous steps by name
formatter.aliases[node.name] = result
res = formatter.getvalue(result) # type: ignore[possibly-undefined]
res = formatter.getvalue(result)
if _MAGIC_SYMPY_ERROR_STRING in res:
raise CUTLASSEVTOpNotImplementedError(
"sympy / indexing expressions not yet supported in EVT fusion"
Expand Down Expand Up @@ -266,7 +266,7 @@ def ir_to_evt_argument_string(
if node.name is not None:
formatter.aliases[node.name] = result

res: str = formatter.getvalue(result) # type: ignore[possibly-undefined]
res: str = formatter.getvalue(result)
if _MAGIC_SYMPY_ERROR_STRING in res:
raise CUTLASSEVTOpNotImplementedError(
"sympy / indexing expressions not yet supported in EVT fusion"
Expand Down
2 changes: 1 addition & 1 deletion torch/_inductor/codegen/wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def get_cpp_op_schema(kernel: torch._ops.OpOverload) -> str:
cpp_return_value = f"std::tuple<{tuple_returns}>"

cpp_arg_type = [f"{convert_arg_type(arg)} {arg.name}" for arg in args]
return f"{cpp_return_value}({', '.join(cpp_arg_type)})" # type: ignore[possibly-undefined]
return f"{cpp_return_value}({', '.join(cpp_arg_type)})"


# TODO: Move to a well known place
Expand Down
4 changes: 2 additions & 2 deletions torch/_inductor/comm_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ def estimate_nccl_collective_runtime(snode: "BaseSchedulerNode") -> float:
nsteps = nRanks - 1

# Convert bus BW to algorithm BW (tensor bytes / algoBW = actual execution time)
ratio = (1.0 * nRanks) / nsteps # type: ignore[possibly-undefined]
ratio = (1.0 * nRanks) / nsteps
bandwidth = busBw * ratio
# Convert GB/s to GB/ns
bandwidth_GB_per_ns = bandwidth / 1e9
Expand All @@ -236,7 +236,7 @@ def estimate_nccl_collective_runtime(snode: "BaseSchedulerNode") -> float:
if nNodes > 1:
netOverhead = 1.0 # getNetOverhead(comm);
intraLat = max(intraLat, netOverhead)
latency += (nsteps - nInterSteps) * intraLat + nInterSteps * interLat # type: ignore[possibly-undefined]
latency += (nsteps - nInterSteps) * intraLat + nInterSteps * interLat
# Convert us to ns
latency_ns = latency * 1e3

Expand Down
8 changes: 4 additions & 4 deletions torch/_inductor/fx_passes/group_batch_fusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,9 +170,9 @@ def fuse(self, graph: torch.fx.GraphModule, subset: List[torch.fx.Node]):
input, weight = node.args
bias = None
batch_nodes.append(node)
batch_inputs.append(input) # type: ignore[possibly-undefined]
batch_weights.append(weight) # type: ignore[possibly-undefined]
batch_biases.append(bias) # type: ignore[possibly-undefined]
batch_inputs.append(input)
batch_weights.append(weight)
batch_biases.append(bias)

with graph.inserting_before(subset[-1]):
fused_inputs = decompose_stack(graph, batch_inputs)
Expand All @@ -191,7 +191,7 @@ def fuse(self, graph: torch.fx.GraphModule, subset: List[torch.fx.Node]):
new_bias_add = graph.call_function(
aten.add, args=((batch_biases[i], new_mm))
)
new_mm_cont = new_bias_add if has_bias else new_mm # type: ignore[possibly-undefined]
new_mm_cont = new_bias_add if has_bias else new_mm
original_mm.replace_all_uses_with(new_mm_cont)
new_mm_cont.meta.update(original_mm.meta)
graph.erase_node(original_mm)
Expand Down
4 changes: 2 additions & 2 deletions torch/_inductor/fx_passes/mkldnn_fusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ def fn(match, *args, **kwargs):
L[aten.mul](out, negative_slope),
)
if lowp_dtype:
out = L[prims.convert_element_type.default](out, dtype=dtype2) # type: ignore[possibly-undefined]
out = L[prims.convert_element_type.default](out, dtype=dtype2)
return out

return fn
Expand Down Expand Up @@ -324,7 +324,7 @@ def fn(match, *args, **kwargs):
out = L[prims.convert_element_type.default](out, dtype=torch.float)
out = L[aten.clamp_max](L[aten.clamp_min](out, min_value), max_value)
if lowp_dtype:
out = L[prims.convert_element_type.default](out, dtype=dtype2) # type: ignore[possibly-undefined]
out = L[prims.convert_element_type.default](out, dtype=dtype2)
return out

return fn
Expand Down
2 changes: 1 addition & 1 deletion torch/_inductor/fx_passes/pre_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def pre_grad_passes(gm: torch.fx.GraphModule, example_inputs):

gm_after_fx_passes = gm.__copy__()
numeric_check_if_enabled(
gm_before_fx_passes, # type: ignore[possibly-undefined]
gm_before_fx_passes,
gm_after_fx_passes,
example_inputs,
config.fx_passes_numeric_check.get("num_iterations", 1),
Expand Down
10 changes: 5 additions & 5 deletions torch/_inductor/fx_passes/quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -1360,7 +1360,7 @@ def qconv_weight_prepack(match: Match, *args, **kwargs):
graph.erase_node(conv_node)
# Erase the dequant pattern
if dtype == torch.bfloat16:
graph.erase_node(convert_to_bf16) # type: ignore[possibly-undefined]
graph.erase_node(convert_to_bf16)
# Erase the dequant pattern
graph.erase_node(mul_node)
graph.erase_node(sub_node)
Expand All @@ -1369,7 +1369,7 @@ def qconv_weight_prepack(match: Match, *args, **kwargs):
if clone_node is not None:
graph.erase_node(clone_node)
if dtype == torch.bfloat16:
graph.erase_node(weight_to_bf16_node) # type: ignore[possibly-undefined]
graph.erase_node(weight_to_bf16_node)
graph.erase_node(dequant_per_channel)
counters["inductor"]["qconv2d_weight_prepack_matcher_count"] += 1
counters["inductor"]["qconv2d_weight_prepack_matcher_nodes"] += len(
Expand Down Expand Up @@ -1697,14 +1697,14 @@ def qlinear_weight_prepack(match: Match, *args, **kwargs):
if input_contiguous:
graph.erase_node(output_reshape_node)
elif not input_contiguous and bias:
graph.erase_node(output_add_node_for_bias) # type: ignore[possibly-undefined]
graph.erase_node(output_add_node_for_bias)
graph.erase_node(linear_node)
if input_dim_exceeds_two:
if input_contiguous:
graph.erase_node(act_reshape_node)
else:
graph.erase_node(act_expand_node)
graph.erase_node(wgt_expand_node) # type: ignore[possibly-undefined]
graph.erase_node(wgt_expand_node)
if dtype == torch.bfloat16:
graph.erase_node(activation_to_bf16_node)
# Erase the dequant pattern
Expand All @@ -1714,7 +1714,7 @@ def qlinear_weight_prepack(match: Match, *args, **kwargs):
# Erase the dequant per channel pattern
graph.erase_node(t_node)
if dtype == torch.bfloat16:
graph.erase_node(weight_to_bf16_node) # type: ignore[possibly-undefined]
graph.erase_node(weight_to_bf16_node)
graph.erase_node(dequant_per_channel)

counters["inductor"]["qlinear_weight_prepack_matcher_count"] += 1
Expand Down
2 changes: 1 addition & 1 deletion torch/_inductor/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -845,7 +845,7 @@ def debug(msg):
):
debug("fallback_handler")
result = fallback_handler(n.target, add_to_fallback_set=False)(
*args, **kwargs # type: ignore[possibly-undefined]
*args, **kwargs
)
elif n.op == "call_function" and n.target in layout_constraints:
debug("layout_constraints")
Expand Down
6 changes: 3 additions & 3 deletions torch/_inductor/lowering.py
Original file line number Diff line number Diff line change
Expand Up @@ -607,7 +607,7 @@ def register_pointwise(
fn,
override_return_dtype=override_return_dtype,
override_fn_when_input_bool=override_fn_when_input_bool,
override_fn_when_cuda_float64=fn_libdevice if use_libdevice_for_f64 else None, # type: ignore[possibly-undefined]
override_fn_when_cuda_float64=fn_libdevice if use_libdevice_for_f64 else None,
allow_alpha=allow_alpha,
)
fn = register_lowering(
Expand Down Expand Up @@ -3630,8 +3630,8 @@ def accumulate(grad, out, index_ranges):
out = right_reflect[i]
index_range = (xyz[i], dhw[i] - padding_right[i], dhw[i] - 1)

outs.append(out) # type: ignore[possibly-undefined]
index_ranges.append(index_range) # type: ignore[possibly-undefined]
outs.append(out)
index_ranges.append(index_range)

grad = accumulate(grad, outs, index_ranges)

Expand Down
2 changes: 1 addition & 1 deletion torch/_inductor/pattern_matcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -1196,7 +1196,7 @@ def apply(self, graph: torch.fx.GraphModule) -> int:
if (
self.prevent_match_across_mutations
and is_match(m)
and len(set(map(get_mutation_region_id_partial, m.nodes))) != 1 # type: ignore[possibly-undefined]
and len(set(map(get_mutation_region_id_partial, m.nodes))) != 1
):
continue
if os.environ.get("TORCHINDUCTOR_PATTERN_MATCH_DEBUG") == node.name:
Expand Down
10 changes: 5 additions & 5 deletions torch/_inductor/scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -1038,7 +1038,7 @@ def fuse(cls, producer, consumer):
else:
fused_nodes.append(node)

return cls(producer.scheduler, fused_nodes, prev_node_1, prev_node_2) # type: ignore[possibly-undefined]
return cls(producer.scheduler, fused_nodes, prev_node_1, prev_node_2)

def __init__(
self,
Expand Down Expand Up @@ -2256,13 +2256,13 @@ def codegen(self):

if node.is_template():
node, *epilogue = node.get_nodes()
self.get_backend(device).codegen_template(node, epilogue) # type: ignore[possibly-undefined]
self.get_backend(device).codegen_template(node, epilogue)
elif node.is_extern():
self.codegen_extern_call(node)
elif node.is_foreach():
self.get_backend(device).codegen_foreach(node) # type: ignore[possibly-undefined]
self.get_backend(device).codegen_foreach(node)
elif isinstance(node, (FusedSchedulerNode, SchedulerNode)):
self.get_backend(device).codegen_nodes(node.get_nodes()) # type: ignore[possibly-undefined]
self.get_backend(device).codegen_nodes(node.get_nodes())
else:
assert isinstance(node, NopKernelSchedulerNode)
node.allocate()
Expand All @@ -2271,7 +2271,7 @@ def codegen(self):
V.graph.wrapper_code.generate_inf_and_nan_checker(node)

if config.triton.debug_sync_kernel:
self.get_backend(device).codegen_sync() # type: ignore[possibly-undefined]
self.get_backend(device).codegen_sync()

self.available_buffer_names.update(node.get_names())

Expand Down
2 changes: 1 addition & 1 deletion torch/_inductor/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,7 @@ def timed(
synchronize(device)
t1 = time.perf_counter()
# GC the result after timing
assert result is not None # type: ignore[possibly-undefined]
assert result is not None
return t1 - t0


Expand Down
Loading

0 comments on commit 40ece2e

Please sign in to comment.