Skip to content

Commit

Permalink
[pytorch][codegen] migrate gen_variable_factories.py to the new data …
Browse files Browse the repository at this point in the history
…model (pytorch#47818)

Summary:
Pull Request resolved: pytorch#47818

This is another relatively small codegen.

Ideally we should CppSignature.decl() to generate the c++ function declaration.
We didn't because it needs to add 'at::' to the types defined in ATen namespace.

E.g.:
- standard declaration:
```
Tensor eye(int64_t n, int64_t m, const TensorOptions & options={})
```

- expected:
```
at::Tensor eye(int64_t n, int64_t m, const at::TensorOptions & options = {})
```

Kept the hacky fully_qualified_type() method to keep compatibility with old codegen.

We could clean up by:
- Using these types in torch namespace - but this is a user facing header file,
  not sure if it will cause problem;
- Update cpp.argument_type() method to take optional namespace argument;

Confirmed byte-for-byte compatible with the old codegen:
```
Run it before and after this PR:
  .jenkins/pytorch/codegen-test.sh <baseline_output_dir>
  .jenkins/pytorch/codegen-test.sh <test_output_dir>

Then run diff to compare the generated files:
  diff -Naur <baseline_output_dir> <test_output_dir>
```

Test Plan: Imported from OSS

Reviewed By: bhosmer

Differential Revision: D24909478

Pulled By: ljk53

fbshipit-source-id: a0ceaa60cc765c526908fee39f151cd7ed5ec923
  • Loading branch information
ljk53 authored and facebook-github-bot committed Nov 14, 2020
1 parent 4779553 commit 07e98d2
Show file tree
Hide file tree
Showing 3 changed files with 59 additions and 54 deletions.
1 change: 1 addition & 0 deletions mypy-strict.ini
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ files = tools/codegen/gen.py,
tools/autograd/gen_annotated_fn_args.py,
tools/autograd/gen_python_functions.py,
tools/autograd/gen_trace_type.py,
tools/autograd/gen_variable_factories.py,
torch/utils/benchmark/utils/common.py,
torch/utils/benchmark/utils/timer.py,
torch/utils/benchmark/utils/valgrind_wrapper/*.py,
Expand Down
2 changes: 1 addition & 1 deletion tools/autograd/gen_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ def is_operator_selected_for_training(decl):
# Generate variable_factories.h
from .gen_variable_factories import gen_variable_factories
# Some non-selectable ops (e.g. prim ops) need factory methods so we pass in `full_aten_decls` here.
gen_variable_factories(out, full_aten_decls, template_path)
gen_variable_factories(out, native_functions_path, template_path)


def gen_autograd_python(aten_path, native_functions_path, out, autograd_dir):
Expand Down
110 changes: 57 additions & 53 deletions tools/autograd/gen_variable_factories.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,77 +3,81 @@
# This writes one file: variable_factories.h

import re
from typing import Optional, List

from .utils import CodeTemplate, write


FUNCTION_TEMPLATE = CodeTemplate("""\
inline at::Tensor ${name}(${formals}) {
at::Tensor tensor = ([&]() {
at::AutoNonVariableTypeMode non_var_type_mode(true);
return at::${name}(${actuals});
})();
at::Tensor result =
autograd::make_variable(std::move(tensor), /*requires_grad=*/${requires_grad});
return result;
}
""")

from tools.codegen.api.types import *
import tools.codegen.api.cpp as cpp
import tools.codegen.api.python as python
from tools.codegen.gen import with_native_function, parse_native_yaml, FileManager, mapMaybe
from tools.codegen.model import *

OPTIONAL_TYPE_PATTERN = re.compile(r"c10::optional<(.+)>")
TYPE_PATTERN = re.compile(r"(?:const\s+)?([A-Z]\w+)")


def fully_qualified_type(argument_type):
def maybe_optional_type(t, opt_match):
return 'c10::optional<{}>'.format(t) if opt_match else t
# Add 'at::' to types defined in ATen namespace, e.g. Tensor, TensorList, IntArrayRef and etc.
# TODO: maybe update the cpp argument API to take optional namespace argument?
def fully_qualified_type(argument_type: str) -> str:
def maybe_optional_type(type: str, is_opt: bool) -> str:
return f'c10::optional<{type}>' if is_opt else type

opt_match = OPTIONAL_TYPE_PATTERN.match(argument_type)
is_opt = opt_match is not None
if opt_match:
argument_type = argument_type[opt_match.start(1):opt_match.end(1)]
match = TYPE_PATTERN.match(argument_type)
if match is None:
return maybe_optional_type(argument_type, opt_match)
return maybe_optional_type(argument_type, is_opt)
index = match.start(1)
qualified_type = "{}at::{}".format(argument_type[:index], argument_type[index:])
return maybe_optional_type(qualified_type, opt_match)
qualified_type = f'{argument_type[:index]}at::{argument_type[index:]}'
return maybe_optional_type(qualified_type, is_opt)

def gen_variable_factories(out: str, native_yaml_path: str, template_path: str) -> None:
native_functions = parse_native_yaml(native_yaml_path)
fm = FileManager(install_dir=out, template_dir=template_path, dry_run=False)
fm.write_with_template('variable_factories.h', 'variable_factories.h', lambda: {
'generated_comment': '@' + f'generated from {fm.template_dir}/variable_factories.h',
'function_definitions': list(mapMaybe(process_function, native_functions)),
})

@with_native_function
def process_function(f: NativeFunction) -> Optional[str]:
name = cpp.name(f.func)
has_tensor_options = python.has_tensor_options(f)
is_factory = has_tensor_options or name.endswith("_like")

def gen_variable_factories(out, declarations, template_path):
function_definitions = []
for decl in declarations:
has_tensor_options = any(a["simple_type"] == "TensorOptions" for a in decl["arguments"])
is_namespace_fn = 'namespace' in decl['method_of']
if (has_tensor_options or decl["name"].endswith("_like")) and is_namespace_fn:
function_definitions.append(
process_function(
decl,
has_tensor_options,
)
)
write(out,
"variable_factories.h",
CodeTemplate.from_file(template_path + "/variable_factories.h"),
{"function_definitions": function_definitions})
if Variant.function not in f.variants or not is_factory:
return None

sig = CppSignatureGroup.from_schema(f.func, method=False).signature
formals: List[str] = []
exprs: List[str] = []
requires_grad = 'false'
for arg in sig.arguments():
qualified_type = fully_qualified_type(arg.type)
if arg.default:
formals.append(f'{qualified_type} {arg.name} = {arg.default}')
else:
formals.append(f'{qualified_type} {arg.name}')

def process_function(decl, has_tensor_options):
formals = []
actuals = []
for argument in decl["arguments"]:
type = fully_qualified_type(argument["type"])
default = " = {}".format(argument["default"]) if "default" in argument else ""
formals.append("{} {}{}".format(type, argument["name"], default))
actual = argument["name"]
if argument["simple_type"] == "TensorOptions":
if isinstance(arg.argument, TensorOptionsArguments):
# note: we remove the requires_grad setting from the TensorOptions because
# it is ignored anyways (and we actually have an assertion that it isn't set
# which would fail otherwise). We handle requires_grad explicitly here
# instead of passing it through to the kernel.
actual = "at::TensorOptions({}).requires_grad(c10::nullopt)".format(actual)
actuals.append(actual)
requires_grad = "options.requires_grad()" if has_tensor_options else "false"
exprs.append(f'at::TensorOptions({arg.name}).requires_grad(c10::nullopt)')
# Manually set the requires_grad bit on the result tensor.
requires_grad = f'{arg.name}.requires_grad()'
else:
exprs.append(arg.name)

return FUNCTION_TEMPLATE.substitute(
name=decl["name"], formals=formals, actuals=actuals, requires_grad=requires_grad
)
return f"""\
inline at::Tensor {name}({', '.join(formals)}) {{
at::Tensor tensor = ([&]() {{
at::AutoNonVariableTypeMode non_var_type_mode(true);
return at::{name}({', '.join(exprs)});
}})();
at::Tensor result =
autograd::make_variable(std::move(tensor), /*requires_grad=*/{requires_grad});
return result;
}}
"""

0 comments on commit 07e98d2

Please sign in to comment.