Skip to content

Commit

Permalink
quant doc: improve rendered documentation for backend_config_dict
Browse files Browse the repository at this point in the history
Summary:

This improves the documentation page for backend_config_dict to render
the configurations in a human readable format, such as

```
{
  'pattern': torch.nn.modules.pooling.AdaptiveAvgPool1d,
  'dtype_configs': [
    {
      'input_dtype': torch.quint8,
      'output_dtype': torch.quint8,
    },
    {
      'input_dtype': torch.float16,
      'weight_dtype': torch.float16,
      'bias_dtype': torch.float16,
      'output_dtype': torch.float16,
    },
  ],
  'observation_type': ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT,
},
```

The results are also now sorted alphabetically by the normalized name of
the root op in the pattern.

A couple of utility functions are created to help with this. If in the future
we convert backend_config_dict to use typed objects, we can move this logic
to the objects at that time.

Test plan:

```
cd docs
make html
cd build
python -m server.http
// renders correctly, example: https://gist.github.com/vkuzo/76adfc7c89e119c59813a733fa2cd56f
```

Pull Request resolved: pytorch#77535

Approved by: https://github.com/andrewor14
  • Loading branch information
vkuzo authored and pytorchmergebot committed May 18, 2022
1 parent e7cb44b commit c15fca1
Show file tree
Hide file tree
Showing 2 changed files with 126 additions and 2 deletions.
43 changes: 41 additions & 2 deletions docs/source/scripts/build_quantization_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,13 @@
These are for use in the documentation.
"""

import torch
from torch.ao.quantization.backend_config import get_native_backend_config_dict
from torch.ao.quantization.backend_config.utils import (
entry_to_pretty_str,
remove_boolean_dispatch_from_name,
)
import os.path
from pprint import pprint


# Create a directory for the images, if it doesn't exist
Expand All @@ -20,4 +24,39 @@
output_path = os.path.join(QUANTIZATION_BACKEND_CONFIG_IMAGE_PATH, "default_backend_config.txt")

with open(output_path, "w") as f:
pprint(get_native_backend_config_dict(), stream=f)
native_backend_config_dict = get_native_backend_config_dict()

configs = native_backend_config_dict['configs']

def _sort_key_func(entry):
pattern = entry['pattern']
while isinstance(pattern, tuple):
pattern = pattern[-1]

pattern = remove_boolean_dispatch_from_name(pattern)
if not isinstance(pattern, str):
# methods are already strings
pattern = torch.typename(pattern)

# we want
#
# torch.nn.modules.pooling.AdaptiveAvgPool1d
#
# and
#
# torch._VariableFunctionsClass.adaptive_avg_pool1d
#
# to be next to each other, so convert to all lower case
# and remove the underscores, and compare the last part
# of the string
pattern_str_normalized = pattern.lower().replace('_', '')
key = pattern_str_normalized.split('.')[-1]
return key

configs.sort(key=_sort_key_func)

entries = []
for entry in configs:
entries.append(entry_to_pretty_str(entry))
entries = ",\n".join(entries)
f.write(entries)
85 changes: 85 additions & 0 deletions torch/ao/quantization/backend_config/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import torch
import torch.nn as nn
import torch.nn.functional as F
from ..quantization_types import Pattern

def get_pattern_to_dtype_configs(
Expand Down Expand Up @@ -115,3 +116,87 @@ def extra_inputs_getter(pattern) -> List[Any]:
extra_inputs_getter_mapping[pattern] = extra_inputs_getter

return extra_inputs_getter_mapping

def remove_boolean_dispatch_from_name(p) -> Any:
"""
Some ops have a default string representation such as
'<function boolean_dispatch.<locals>.fn at 0x7ff1106bf280>',
this function replaces them with the hardcoded function names.
"""
if p is F.fractional_max_pool2d:
return "torch.nn.functional.fractional_max_pool2d"
elif p is F.fractional_max_pool3d:
return "torch.nn.functional.fractional_max_pool3d"
elif p is F.max_pool1d:
return "torch.nn.functional.max_pool1d"
elif p is F.max_pool2d:
return "torch.nn.functional.max_pool2d"
elif p is F.max_pool3d:
return "torch.nn.functional.max_pool3d"
elif p is F.adaptive_max_pool1d:
return "torch.nn.functional.adaptive_max_pool1d"
elif p is F.adaptive_max_pool2d:
return "torch.nn.functional.adaptive_max_pool2d"
elif p is F.adaptive_max_pool3d:
return "torch.nn.functional.adaptive_max_pool3d"
assert "boolean_dispatch" not in str(p), \
f"{p} does not have a human readable representation in " + \
"quantization documentation"
return p

def pattern_to_human_readable(p) -> Any:
if isinstance(p, tuple):
# nested patterns, recurse
return tuple(pattern_to_human_readable(inner_p) for inner_p in p)
elif isinstance(p, str):
# method names are already human readable
return p
else:
p = remove_boolean_dispatch_from_name(p)
return p

# TODO(future PR): move backend_config_dict to use dataclass and move this logic to
# the corresponding __str__ function
def entry_to_pretty_str(entry) -> str:
"""
Given a backend_config_dict entry, returns a string with the human readable
representation of it.
"""
s = "{\n"

# always output the pattern first
if "pattern" in entry:
pattern_str = pattern_to_human_readable(entry["pattern"])

s += f" 'pattern': {pattern_str},\n"

# custom output for dtype_configs to make it look nice
if "dtype_configs" in entry:
s += " 'dtype_configs': [\n"
for dtype_config in entry["dtype_configs"]:
s += " {\n"
for k, v in dtype_config.items():
s += f" '{k}': {v},\n"
s += " },\n"
s += " ],\n"

# custom output for num_tensor_args_to_observation_type to make it look nice
if "num_tensor_args_to_observation_type" in entry:
s += " 'num_tensor_args_to_observation_type': {\n"
for k, v in entry["num_tensor_args_to_observation_type"].items():
s += f" {k}: {v},\n"
s += " },\n"

# output all the other fields
custom_handled_fields = [
"pattern",
"dtype_configs",
"num_tensor_args_to_observation_type",
]
for field_name in entry:
if field_name in custom_handled_fields:
continue
s += f" '{field_name}': {entry[field_name]},\n"

s += "}"
return s

0 comments on commit c15fca1

Please sign in to comment.