forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_smoke.py
66 lines (49 loc) · 1.79 KB
/
test_smoke.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
# Owner(s): ["module: inductor"]
import logging
import unittest
import torch
import torch._dynamo as torchdynamo
import torch._inductor.config as torchinductor_config
from torch.testing._internal.common_utils import IS_LINUX, TestCase
from torch.testing._internal.inductor_utils import HAS_CUDA
class MLP(torch.nn.Module):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(1, 6)
self.l2 = torch.nn.Linear(6, 1)
def forward(self, x=None):
x = torch.relu(self.l1(x))
x = torch.relu(self.l2(x))
return x
def _test_f(x):
return x * x
class SmokeTest(TestCase):
@unittest.skipIf(not HAS_CUDA, "Triton is not available")
def test_mlp(self):
torchdynamo.config.log_level = logging.INFO
torchdynamo.config.verbose = True
torchinductor_config.debug = True
mlp = torch.compile(MLP().cuda())
for _ in range(3):
mlp(torch.randn(1, device="cuda"))
torchdynamo.config.verbose = False
torchinductor_config.debug = False
@unittest.skipIf(not HAS_CUDA, "Triton is not available")
def test_compile_decorator(self):
@torch.compile
def foo(x):
return torch.sin(x) + x.min()
@torch.compile(mode="reduce-overhead")
def bar(x):
return x * x
for _ in range(3):
foo(torch.full((3, 4), 0.7, device="cuda"))
bar(torch.rand((2, 2), device="cuda"))
def test_compile_invalid_options(self):
with self.assertRaises(RuntimeError):
opt_f = torch.compile(_test_f, mode="ha")
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
if IS_LINUX and torch.cuda.is_available():
if torch.cuda.get_device_properties(0).major > 5:
run_tests()