Skip to content

Commit

Permalink
Skip PooledEmbeddingModulesTest until FailedHealthCheck is fixed (pyt…
Browse files Browse the repository at this point in the history
…orch#1999)

Summary:
Pull Request resolved: pytorch#1999

Hypothesis version 6.83.2 onwards introduces
`HealthCheck.differing_executors` that causes tests
in`permute_pooled_embedding_test.py` to fail with error:

`The method PooledEmbeddingModulesTest.setUp was called from multiple different executors. This may lead to flaky tests and nonreproducible errors when replaying from database`.

Currently, we're using the latest version of hypothesis on CI:

https://github.com/pytorch/FBGEMM/actions/runs/6084855480/job/16515052387

Current hypothesis on FBCode is 6.70.1 which does not have
`HealthCheck.differing_executors`.

Reviewed By: shintaro-iwasaki

Differential Revision: D49020046

fbshipit-source-id: 8ab1350411260c771baf05efe607f91c12df2385
  • Loading branch information
spcyppt committed Sep 6, 2023
1 parent 6f0abb0 commit 0baa308
Showing 1 changed file with 11 additions and 6 deletions.
17 changes: 11 additions & 6 deletions fbgemm_gpu/test/permute_pooled_embedding_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,11 @@

typed_gpu_unavailable: Tuple[bool, str] = gpu_unavailable

if getattr(HealthCheck, "not_a_test_method", False):
suppressed_list: List[HealthCheck] = [HealthCheck.not_a_test_method]
else:
suppressed_list = []

INTERN_MODULE = "fbgemm_gpu.permute_pooled_embedding_modules"
FIXED_EXTERN_API = {
"PermutePooledEmbeddings": {
Expand Down Expand Up @@ -68,13 +73,13 @@ def forward(self, x: Tensor) -> Tensor:

# @parameterized_class([{"device_type": "cpu"}, {"device_type": "cuda"}])
class PooledEmbeddingModulesTest(unittest.TestCase):
@settings(deadline=10000, suppress_health_check=[HealthCheck.not_a_test_method])
@settings(deadline=10000, suppress_health_check=suppressed_list)
# pyre-fixme[56]: Pyre was not able to infer the type of argument
@given(device_type=cpu_and_maybe_gpu())
def setUp(self, device_type: torch.device) -> None:
self.device = device_type

@unittest.skipIf(*typed_gpu_unavailable)
@unittest.skipIf(True, "Skip until FailedHealthCheck is fixed")
def test_permutation(self) -> None:
net = Net().to(self.device)

Expand All @@ -84,7 +89,7 @@ def test_permutation(self) -> None:
[6, 7, 8, 9, 0, 1, 5, 2, 3, 4],
)

@unittest.skipIf(*typed_gpu_unavailable)
@unittest.skipIf(True, "Skip until FailedHealthCheck is fixed")
def test_permutation_autograd(self) -> None:
net = Net().to(self.device)

Expand Down Expand Up @@ -117,7 +122,7 @@ def test_compatibility(self) -> None:
f"{FWD_COMPAT_MSG}",
)

@unittest.skipIf(*typed_gpu_unavailable)
@unittest.skipIf(True, "Skip until FailedHealthCheck is fixed")
def test_pooled_table_batched_embedding(self) -> None:
num_emb_bags = 5
num_embeddings = 10
Expand Down Expand Up @@ -160,7 +165,7 @@ def test_pooled_table_batched_embedding(self) -> None:
ref_permuted_pooled_emb.to(self.device), permuted_pooled_emb
)

@unittest.skipIf(*typed_gpu_unavailable)
@unittest.skipIf(True, "Skip until FailedHealthCheck is fixed")
def test_permutation_autograd_meta(self) -> None:
"""
Test that permute_pooled_embeddings_autograd works with meta tensor and
Expand All @@ -175,7 +180,7 @@ def test_permutation_autograd_meta(self) -> None:
assert output_meta.shape == output_cpu.shape
assert input.shape == output_meta.shape

@unittest.skipIf(*typed_gpu_unavailable)
@unittest.skipIf(True, "Skip until FailedHealthCheck is fixed")
def test_duplicate_permutations(self) -> None:
embs_dims = [2, 3, 1, 4]
permute = [3, 0, 2, 0, 1, 3]
Expand Down

0 comments on commit 0baa308

Please sign in to comment.