Skip to content

Commit

Permalink
[READY TO MERGE] Enable tests that use DataLoader with multiple worke…
Browse files Browse the repository at this point in the history
…rs on Windows (#6745)

* Don't import TEST_CUDA for test_dataloader on Windows

* test_partial_workers is stuck on Windows
  • Loading branch information
yf225 authored and ezyang committed Jun 7, 2018
1 parent 89ea6ac commit c84b97b
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 22 deletions.
16 changes: 1 addition & 15 deletions test/test_dataloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,6 @@ def test_sequential_pin_memory(self):
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())

@unittest.skipIf(IS_WINDOWS, "FIXME: Intermittent CUDA out-of-memory error")
def test_multiple_dataloaders(self):
loader1_it = iter(DataLoader(self.dataset, num_workers=1))
loader2_it = iter(DataLoader(self.dataset, num_workers=2))
Expand All @@ -355,7 +354,6 @@ def test_multiple_dataloaders(self):
next(loader1_it)
next(loader2_it)

@unittest.skipIf(IS_WINDOWS, "FIXME: Intermittent CUDA out-of-memory error")
@unittest.skip("temporarily disable until flaky failures are fixed")
def test_segfault(self):
p = ErrorTrackingProcess(target=_test_segfault)
Expand All @@ -373,7 +371,6 @@ def test_segfault(self):
finally:
p.terminate()

@unittest.skipIf(IS_WINDOWS, "FIXME: Intermittent CUDA out-of-memory error")
def test_timeout(self):
p = ErrorTrackingProcess(target=_test_timeout)
p.start()
Expand All @@ -386,7 +383,6 @@ def test_timeout(self):
finally:
p.terminate()

@unittest.skipIf(IS_WINDOWS, "FIXME: Intermittent CUDA out-of-memory error")
def test_worker_seed(self):
num_workers = 6
dataset = SynchronizedSeedDataset(num_workers, num_workers)
Expand All @@ -396,7 +392,6 @@ def test_worker_seed(self):
seeds.add(batch[0])
self.assertEqual(len(seeds), num_workers)

@unittest.skipIf(IS_WINDOWS, "FIXME: Intermittent CUDA out-of-memory error")
def test_worker_init_fn(self):
dataset = SeedDataset(4)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2,
Expand All @@ -411,19 +406,15 @@ def test_shuffle(self):
def test_shuffle_batch(self):
self._test_shuffle(DataLoader(self.dataset, batch_size=2, shuffle=True))

@unittest.skipIf(IS_WINDOWS, "FIXME: Intermittent CUDA out-of-memory error")
def test_sequential_workers(self):
self._test_sequential(DataLoader(self.dataset, num_workers=4))

@unittest.skipIf(IS_WINDOWS, "FIXME: Intermittent CUDA out-of-memory error")
def test_seqential_batch_workers(self):
self._test_sequential(DataLoader(self.dataset, batch_size=2, num_workers=4))

@unittest.skipIf(IS_WINDOWS, "FIXME: Intermittent CUDA out-of-memory error")
def test_shuffle_workers(self):
self._test_shuffle(DataLoader(self.dataset, shuffle=True, num_workers=4))

@unittest.skipIf(IS_WINDOWS, "FIXME: Intermittent CUDA out-of-memory error")
def test_shuffle_batch_workers(self):
self._test_shuffle(DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4))

Expand All @@ -446,12 +437,10 @@ def _test_batch_sampler(self, **kwargs):
self.assertEqual(len(input), 3)
self.assertEqual(input, self.data[offset:offset + 3])

@unittest.skipIf(IS_WINDOWS, "FIXME: Intermittent CUDA out-of-memory error")
def test_batch_sampler(self):
self._test_batch_sampler()
self._test_batch_sampler(num_workers=4)

@unittest.skipIf(IS_WINDOWS, "FIXME: Intermittent CUDA out-of-memory error")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_shuffle_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
Expand All @@ -478,11 +467,10 @@ def __len__(self):
def test_error(self):
self._test_error(DataLoader(ErrorDataset(100), batch_size=2, shuffle=True))

@unittest.skipIf(IS_WINDOWS, "FIXME: Intermittent CUDA out-of-memory error")
def test_error_workers(self):
self._test_error(DataLoader(ErrorDataset(41), batch_size=2, shuffle=True, num_workers=4))

@unittest.skipIf(IS_WINDOWS, "FIXME: Intermittent CUDA out-of-memory error")
@unittest.skipIf(IS_WINDOWS, "FIXME: stuck test")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_partial_workers(self):
"check that workers exit even if the iterator is not exhausted"
Expand Down Expand Up @@ -637,7 +625,6 @@ class TestStringDataLoader(TestCase):
def setUp(self):
self.dataset = StringDataset()

@unittest.skipIf(IS_WINDOWS, "FIXME: Intermittent CUDA out-of-memory error")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_shuffle_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
Expand Down Expand Up @@ -721,7 +708,6 @@ def _run_ind_worker_queue_test(self, batch_size, num_workers):
if current_worker_idx == num_workers:
current_worker_idx = 0

@unittest.skipIf(IS_WINDOWS, "FIXME: Intermittent CUDA out-of-memory error")
def test_ind_worker_queue(self):
for batch_size in (8, 16, 32, 64):
for num_workers in range(1, 6):
Expand Down
12 changes: 5 additions & 7 deletions test/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
from torch.utils.trainer import Trainer
from torch.utils.trainer.plugins import *
from torch.utils.trainer.plugins.plugin import Plugin
from torch.utils.serialization import load_lua
from torch.autograd._functions.utils import prepare_onnx_paddings
from torch.autograd._functions.utils import check_onnx_broadcast
from common import IS_WINDOWS, IS_PPC
Expand Down Expand Up @@ -281,7 +280,6 @@ def test_multi_keep(self):
dataiter = iter(dataloader)
self.assertEqual(len(list(dataiter)), 2)

@unittest.skipIf(IS_WINDOWS, "FIXME: Intermittent CUDA out-of-memory error")
def test_multi_drop(self):
dataloader = torch.utils.data.DataLoader(self.dataset,
batch_size=self.batch_size,
Expand Down Expand Up @@ -593,14 +591,14 @@ def _check_autograd_summary(self, output):
'Distance between autograd prof output and end of output not in [6, 100] lines', output))

def _check_cuda(self, output):
if torch.cuda.is_available():
if HAS_CUDA:
results = re.search('CUDA mode', output)
self.assertIsNotNone(results, self._fail_msg('Should tell users CUDA', output))
else:
results = re.search('CUDA mode', output)
self.assertIsNone(results, self._fail_msg('Should not tell users about CUDA', output))

@unittest.skipIf(torch.cuda.is_available(), 'CPU-only test')
@unittest.skipIf(HAS_CUDA, 'CPU-only test')
def test_bottleneck_cpu_only(self):
rc, out, err = self._run_bottleneck('bottleneck/test.py')
self.assertEqual(rc, 0, 'Run failed with\n{}'.format(err))
Expand All @@ -611,8 +609,7 @@ def test_bottleneck_cpu_only(self):
self._check_cprof_summary(out)
self._check_cuda(out)

@unittest.skipIf(IS_WINDOWS, "FIXME: Intermittent CUDA out-of-memory error")
@unittest.skipIf(not torch.cuda.is_available(), 'No CUDA')
@unittest.skipIf(not HAS_CUDA, 'No CUDA')
def test_bottleneck_cuda(self):
rc, out, err = self._run_bottleneck('bottleneck/test_cuda.py')
self.assertEqual(rc, 0, 'Run failed with\n{}'.format(err))
Expand Down Expand Up @@ -747,6 +744,7 @@ def try_check_onnx_broadcast(dims1, dims2, expect_broadcast, expect_fail):
try_check_onnx_broadcast(dims1, dims2, True, False)


TestLuaReader.init()
if __name__ == '__main__':
from torch.utils.serialization import load_lua
TestLuaReader.init()
run_tests()

0 comments on commit c84b97b

Please sign in to comment.