Skip to content

Commit

Permalink
[BE] Use assertEqual in MultiKernel tests (pytorch#127725)
Browse files Browse the repository at this point in the history
Pull Request resolved: pytorch#127725
Approved by: https://github.com/lezcano
ghstack dependencies: pytorch#131044, pytorch#127724
  • Loading branch information
peterbell10 authored and pytorchmergebot committed Jul 26, 2024
1 parent 9ae288f commit c92f2a1
Showing 1 changed file with 6 additions and 11 deletions.
17 changes: 6 additions & 11 deletions test/inductor/test_multi_kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def test_softmax(self, expect_multi_kernel=True):
# One for the first pass and one for the second pass.
# We mainly care about the wrapper for the final pass here.
wrapper_code = wrapper_code[-1]
self.assertTrue(torch.allclose(ref, act))
self.assertEqual(ref, act)
if expect_multi_kernel:
self.assertTrue(_contains_multi_kernel_code(wrapper_code))
else:
Expand Down Expand Up @@ -142,9 +142,7 @@ def test_layernorm(self):
x = torch.rand(2, 1024).cuda()
ref = ln(x)
act = torch.compile(ln)(x)
self.assertTrue(
torch.allclose(ref, act, atol=1e-4, rtol=1e-4), f"ref:\n{ref}\nact:\n{act}"
)
self.assertEqual(ref, act, atol=1e-4, rtol=1e-4)

def test_inplace_update(self):
"""
Expand All @@ -158,7 +156,7 @@ def f(x, y):
y = torch.rand(1024, 1024).cuda()
ref = f(x, y)
act = torch.compile(f)(x, y)
self.assertTrue(torch.allclose(ref, act))
self.assertEqual(ref, act)

def test_transformer_snippet(self):
model = TransformerSnippet().cuda()
Expand All @@ -179,10 +177,7 @@ def f(*x):
# inductor random number implementation is different to eager.
# We should fallback to eager if we want to test accuracy.
if config.fallback_random:
self.assertTrue(
torch.allclose(ref, act, atol=1e-4, rtol=1e-4),
f"ref:\n{ref}\nact:\n{act}",
)
self.assertEqual(ref, act, atol=1e-4, rtol=1e-4)

def test_transformer_snippet_with_fallback_random(self):
"""
Expand Down Expand Up @@ -236,7 +231,7 @@ def f(x, y):

ref = f(x, y_ref)
act = torch.compile(f)(x, y)
self.assertTrue(torch.allclose(y_ref, y))
self.assertEqual(y_ref, y)

def test_reduction_scratch_buffer(self, force_multi_kernel=1):
"""
Expand All @@ -261,7 +256,7 @@ def f(x):
ref = f(x)
with config.patch("triton.multi_kernel", force_multi_kernel):
act = torch.compile(f)(x)
self.assertTrue(torch.allclose(ref, act))
self.assertEqual(ref, act)

def test_split_scan(self, force_multi_kernel=1):
def f(x):
Expand Down

0 comments on commit c92f2a1

Please sign in to comment.