Skip to content

Commit

Permalink
Enable xdoctest runner in CI for real this time (pytorch#83816)
Browse files Browse the repository at this point in the history
Builds on pytorch#83317 and enables running the doctests. Just need to figure out what is causing the failures.

Pull Request resolved: pytorch#83816
Approved by: https://github.com/ezyang, https://github.com/malfet
  • Loading branch information
Erotemic authored and pytorchmergebot committed Dec 29, 2022
1 parent fb4fc0d commit ad782ff
Show file tree
Hide file tree
Showing 90 changed files with 456 additions and 262 deletions.
4 changes: 2 additions & 2 deletions .circleci/docker/requirements-ci.txt
Original file line number Diff line number Diff line change
Expand Up @@ -179,9 +179,9 @@ pytest-rerunfailures
#Pinned versions:
#test that import:

xdoctest==1.0.2
xdoctest==1.1.0
#Description: runs doctests in pytest
#Pinned versions: 1.0.2
#Pinned versions: 1.1.0
#test that import:

pygments==2.12.0
Expand Down
2 changes: 1 addition & 1 deletion .github/requirements/pip-requirements-macOS.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,4 @@ pytest-shard==0.1.2
scipy==1.9.0
sympy==1.11.1
unittest-xml-reporting<=3.2.0,>=2.0.0
xdoctest==1.0.2
xdoctest==1.1.0
13 changes: 6 additions & 7 deletions test/run_doctests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,12 @@ This script simply runs the torch doctests via the xdoctest runner.
This must be run from the root of the torch repo, as it needs the path to the
torch source code.
"
#xdoctest -m torch --style=google list
This script is provided as a developer convenience. On the CI the doctests are
invoked in 'run_test.py'
"
# To simply list tests
# xdoctest -m torch --style=google list

# Reference: https://stackoverflow.com/questions/59895/bash-script-dir
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
Expand All @@ -16,14 +19,10 @@ echo "TORCH_MODPATH = $TORCH_MODPATH"
if [[ ! -d "$TORCH_MODPATH" ]] ; then
echo "Could not find the path to the torch module"
else

# Next version of xdoctest will support environment variables that overlo


export XDOCTEST_GLOBAL_EXEC="from torch import nn\nimport torch.nn.functional as F\nimport torch"
export XDOCTEST_OPTIONS="+IGNORE_WHITESPACE"
# Note: google wont catch numpy style docstrings (a few exist) but it also wont fail
# on things not intended to be doctests.
export XDOCTEST_STYLE="google"
xdoctest "$TORCH_MODPATH" --style="$XDOCTEST_STYLE" --global-exec "$XDOCTEST_GLOBAL_EXEC" --options="$XDOCTEST_OPTIONS"
xdoctest torch "$TORCH_MODPATH" --style="$XDOCTEST_STYLE" --global-exec "$XDOCTEST_GLOBAL_EXEC" --options="$XDOCTEST_OPTIONS"
fi
29 changes: 26 additions & 3 deletions test/run_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -659,10 +659,9 @@ def run_doctests(test_module, test_directory, options):
import pathlib
pkgpath = pathlib.Path(torch.__file__).parent

#
enabled = {
# TODO: expose these options to the user
# Temporary disable all feature-conditional tests
# For now disable all feature-conditional tests
# 'lapack': 'auto',
# 'cuda': 'auto',
# 'cuda1': 'auto',
Expand All @@ -671,6 +670,9 @@ def run_doctests(test_module, test_directory, options):
'cuda': 0,
'cuda1': 0,
'qengine': 0,
'autograd_profiler': 0,
'cpp_ext': 0,
'monitor': 0,
}

# Resolve "auto" based on a test to determine if the feature is available.
Expand Down Expand Up @@ -707,13 +709,34 @@ def run_doctests(test_module, test_directory, options):
if enabled['qengine']:
os.environ['TORCH_DOCTEST_QENGINE'] = '1'

if enabled['autograd_profiler']:
os.environ['TORCH_DOCTEST_AUTOGRAD_PROFILER'] = '1'

if enabled['cpp_ext']:
os.environ['TORCH_DOCTEST_CPP_EXT'] = '1'

if enabled['monitor']:
os.environ['TORCH_DOCTEST_MONITOR'] = '1'

if 0:
# TODO: could try to enable some of these
os.environ['TORCH_DOCTEST_QUANTIZED_DYNAMIC'] = '1'
os.environ['TORCH_DOCTEST_ANOMOLY'] = '1'
os.environ['TORCH_DOCTEST_AUTOGRAD'] = '1'
os.environ['TORCH_DOCTEST_HUB'] = '1'
os.environ['TORCH_DOCTEST_DATALOADER'] = '1'
os.environ['TORCH_DOCTEST_ONNX'] = '1'
os.environ['TORCH_DOCTEST_FUTURES'] = '1'

pkgpath = os.path.dirname(torch.__file__)

xdoctest_config = {
'global_exec': r'\n'.join([
'from torch import nn',
'import torch.nn.functional as F',
'import torch',
]),
'analysis': 'static', # set to "auto" to test doctests in compiled modules
'style': 'google',
'options': '+IGNORE_WHITESPACE',
}
Expand Down Expand Up @@ -1016,7 +1039,7 @@ def parse_args():
)
parser.add_argument(
"--xdoctest-command",
default='list',
default='all',
help=(
"Control the specific doctest action. "
"Use 'list' to simply parse doctests and check syntax. "
Expand Down
4 changes: 2 additions & 2 deletions torch/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -427,7 +427,7 @@ def is_tensor(obj):
obj (Object): Object to test
Example::
>>> x=torch.tensor([1,2,3])
>>> x = torch.tensor([1, 2, 3])
>>> torch.is_tensor(x)
True
Expand Down Expand Up @@ -627,10 +627,10 @@ def use_deterministic_algorithms(mode, *, warn_only=False):
Example::
>>> # xdoctest: +SKIP
>>> torch.use_deterministic_algorithms(True)
# Forward mode nondeterministic error
>>> # xdoctest: +SKIP
>>> torch.randn(10, device='cuda').kthvalue(0)
...
RuntimeError: kthvalue CUDA does not have a deterministic implementation...
Expand Down
21 changes: 14 additions & 7 deletions torch/_functorch/eager_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,7 @@ def vjp(func: Callable, *primals, has_aux: bool = False):
Case 2: Using ``vjp`` inside ``torch.no_grad`` context manager:
>>> # xdoctest: +SKIP(failing)
>>> with torch.no_grad():
>>> vjp(f)(x)
Expand Down Expand Up @@ -1286,6 +1287,7 @@ def grad(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Calla
Example of using ``grad``:
>>> # xdoctest: +SKIP
>>> from torch.func import grad
>>> x = torch.randn([])
>>> cos_x = grad(lambda x: torch.sin(x))(x)
Expand All @@ -1297,6 +1299,7 @@ def grad(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Calla
When composed with ``vmap``, ``grad`` can be used to compute per-sample-gradients:
>>> # xdoctest: +SKIP
>>> from torch.func import grad, vmap
>>> batch_size, feature_size = 3, 5
>>>
Expand All @@ -1317,6 +1320,7 @@ def grad(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Calla
Example of using ``grad`` with ``has_aux`` and ``argnums``:
>>> # xdoctest: +SKIP
>>> from torch.func import grad
>>> def my_loss_func(y, y_pred):
>>> loss_per_sample = (0.5 * y_pred - y) ** 2
Expand All @@ -1327,13 +1331,14 @@ def grad(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Calla
>>> y_true = torch.rand(4)
>>> y_preds = torch.rand(4, requires_grad=True)
>>> out = fn(y_true, y_preds)
>>> > output is ((grads w.r.t y_true, grads w.r.t y_preds), (y_pred, loss_per_sample))
>>> # > output is ((grads w.r.t y_true, grads w.r.t y_preds), (y_pred, loss_per_sample))
.. note::
Using PyTorch ``torch.no_grad`` together with ``grad``.
Case 1: Using ``torch.no_grad`` inside a function:
>>> # xdoctest: +SKIP
>>> def f(x):
>>> with torch.no_grad():
>>> c = x ** 2
Expand All @@ -1343,6 +1348,7 @@ def grad(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Calla
Case 2: Using ``grad`` inside ``torch.no_grad`` context manager:
>>> # xdoctest: +SKIP
>>> with torch.no_grad():
>>> grad(f)(x)
Expand Down Expand Up @@ -1433,11 +1439,12 @@ def functionalize(func: Callable, *, remove: str = 'mutations') -> Callable:
Example::
>>> # xdoctest: +SKIP
>>> import torch
>>> from torch.fx.experimental.proxy_tensor import make_fx
>>> from torch.func import functionalize
>>>
>>> A function that uses mutations and views, but only on intermediate tensors.
>>> # A function that uses mutations and views, but only on intermediate tensors.
>>> def f(a):
... b = a + 1
... c = b.view(-1)
Expand Down Expand Up @@ -1490,17 +1497,17 @@ def forward(self, a_1):
return view_copy_1
>>> A function that mutates its input tensor
>>> # A function that mutates its input tensor
>>> def f(a):
... b = a.view(-1)
... b.add_(1)
... return a
...
>>> f_no_mutations_and_views_traced = make_fx(functionalize(f, remove='mutations_and_views'))(inpt)
>>>
>>> All mutations and views have been removed,
>>> but there is an extra copy_ in the graph to correctly apply the mutation to the input
>>> after the function has completed.
>>> #
>>> # All mutations and views have been removed,
>>> # but there is an extra copy_ in the graph to correctly apply the mutation to the input
>>> # after the function has completed.
>>> print(f_no_mutations_and_views_traced.code)
Expand Down
1 change: 1 addition & 0 deletions torch/_functorch/fx_minifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ def minifier(fail_f: fx.GraphModule, inps, module_fails, dump_state: Callable =
2. Delta Debugging: Tries replacing half of the graph with inputs. If fails,
tries replacing quarter of the graph, etc.
>>> # xdoctest: +SKIP(failing)
>>> failing_function = fx.symbolic_trace(f)
>>> minimize(failing_function, [torch.randn(5)], lambda fx_g, inps: fx_g(*inps))
Expand Down
3 changes: 3 additions & 0 deletions torch/_namedtensor_internals.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,10 +122,12 @@ def update_names(tensor, names, rename_map, inplace):
For example,
```
>>> # xdoctest: +SKIP
>>> x = torch.empty(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
>>> x.rename('...', 'height', 'width').names
('N', 'C', 'height', 'width')
>>> # xdoctest: +SKIP
>>> x.rename('batch', '...', 'width').names
('batch', 'C', 'H', 'width')
Expand All @@ -136,6 +138,7 @@ def update_names(tensor, names, rename_map, inplace):
For example,
```
>>> # xdoctest: +SKIP
>>> x = torch.empty(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
>>> x.rename(W='width', H='height').names
('N', 'C', 'height', 'width')
Expand Down
1 change: 1 addition & 0 deletions torch/_prims_common/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1496,6 +1496,7 @@ def compute_required_storage_length(
>>> compute_required_storage_length(t.shape, t.stride(), t.storage_offset())
200
>>> # xdoctest: +SKIP(failing)
>>> t2 = torch.empty_strided((1, 2, 3), (5, 7, 11))
>>> size = compute_required_storage_length(t2.shape, t2.stride(), t2.storage_offset())
>>> size == t.storage().size()
Expand Down
1 change: 0 additions & 1 deletion torch/_tensor_str.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,6 @@ def _vector_str(self, indent, summarize, formatter1, formatter2=None):
elements_per_line = max(
1, int(math.floor((PRINT_OPTS.linewidth - indent) / (element_length)))
)
# char_per_line = element_length * elements_per_line # unused

def _val_formatter(val, formatter1=formatter1, formatter2=formatter2):
if formatter2 is not None:
Expand Down
1 change: 1 addition & 0 deletions torch/_vmap_internals.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
in_dims_t = Union[int, Tuple]
out_dims_t = Union[int, Tuple[int, ...]]


# Checks that all args-to-be-batched have the same batch dim size
def _validate_and_get_batch_size(
flat_in_dims: List[Optional[int]], flat_args: List
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@ class LinearReLU(nnqd.Linear):
Examples::
>>> # xdoctest: +SKIP
>>> m = nn.intrinsic.quantized.dynamic.LinearReLU(20, 30)
>>> input = torch.randn(128, 20)
>>> # xdoctest: +SKIP
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
Expand Down
1 change: 1 addition & 0 deletions torch/ao/nn/intrinsic/quantized/modules/linear_relu.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ class LinearLeakyReLU(nnq.Linear):
Same as torch.nn.quantized.Linear
+ negative_slope
Examples::
>>> # xdoctest: +SKIP
>>> m = nn.intrinsic.LinearLeakyReLU(20, 30, 0.01)
>>> input = torch.randn(128, 20)
>>> output = m(input)
Expand Down
13 changes: 7 additions & 6 deletions torch/ao/nn/quantized/dynamic/modules/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

__all__ = ['Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d']


class Conv1d(nnq.Conv1d):
r"""A dynamically quantized conv module with floating point tensors as inputs and outputs.
Expand All @@ -31,9 +32,9 @@ class Conv1d(nnq.Conv1d):
Examples::
>>> # xdoctest: +SKIP
>>> m = nn.quantized.dynamic.Conv1d(16, 33, 3, stride=2)
>>> input = torch.randn(20, 16, 100)
>>> # xdoctest: +SKIP
>>> output = m(input)
"""
Expand Down Expand Up @@ -102,14 +103,14 @@ class Conv2d(nnq.Conv2d):
Examples::
>>> # xdoctest: +SKIP
>>> # With square kernels and equal stride
>>> m = nn.quantized.dynamic.Conv2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.quantized.dynamic.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> # non-square kernels and unequal stride and with padding and dilation
>>> m = nn.quantized.dynamic.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
>>> input = torch.randn(20, 16, 50, 100)
>>> # xdoctest: +SKIP
>>> output = m(input)
"""
Expand Down Expand Up @@ -167,14 +168,14 @@ class Conv3d(nnq.Conv3d):
Examples::
>>> # xdoctest: +SKIP
>>> # With square kernels and equal stride
>>> m = nn.quantized.dynamic.Conv3d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.quantized.dynamic.Conv3d(16, 33, (3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2))
>>> # non-square kernels and unequal stride and with padding and dilation
>>> m = nn.quantized.dynamic.Conv3d(16, 33, (3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2), dilation=(1, 2, 2))
>>> input = torch.randn(20, 16, 56, 56, 56)
>>> # xdoctest: +SKIP
>>> output = m(input)
"""
Expand Down Expand Up @@ -233,8 +234,8 @@ class ConvTranspose1d(nnq.ConvTranspose1d):
Examples::
>>> # With square kernels and equal stride
>>> # xdoctest: +SKIP
>>> # With square kernels and equal stride
>>> m = nndq.ConvTranspose1d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nndq.ConvTranspose1d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
Expand Down Expand Up @@ -294,11 +295,11 @@ class ConvTranspose2d(nnq.ConvTranspose2d):
Examples::
>>> # xdoctest: +SKIP
>>> # With square kernels and equal stride
>>> m = nnq.ConvTranspose2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nnq.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> # xdoctest: +SKIP
>>> output = m(input)
>>> # exact output size can be also specified as an argument
>>> downsample = nnq.Conv2d(16, 16, 3, stride=2, padding=1)
Expand Down Expand Up @@ -355,11 +356,11 @@ class ConvTranspose3d(nnq.ConvTranspose3d):
Examples::
>>> # xdoctest: +SKIP
>>> # With cubic kernels and equal stride
>>> m = nnq.ConvTranspose3d(16, 33, 3, stride=2)
>>> # non-cubic kernels and unequal stride and with padding
>>> m = nnq.ConvTranspose3d(16, 33, (3, 3, 5), stride=(2, 1, 1), padding=(4, 2, 2))
>>> # xdoctest: +SKIP
>>> output = m(input)
>>> # exact output size can be also specified as an argument
>>> downsample = nnq.Conv3d(16, 16, 3, stride=2, padding=1)
Expand Down
Loading

0 comments on commit ad782ff

Please sign in to comment.