forked from junyanz/pytorch-CycleGAN-and-pix2pix
-
Notifications
You must be signed in to change notification settings - Fork 0
/
unaligned_dataset.py
58 lines (44 loc) · 1.9 KB
/
unaligned_dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import os.path
import torchvision.transforms as transforms
from data.base_dataset import BaseDataset
from data.image_folder import make_dataset
from PIL import Image
import PIL
from pdb import set_trace as st
class UnalignedDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A')
self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B')
self.A_paths = make_dataset(self.dir_A)
self.B_paths = make_dataset(self.dir_B)
self.A_paths = sorted(self.A_paths)
self.B_paths = sorted(self.B_paths)
self.A_size = len(self.A_paths)
self.B_size = len(self.B_paths)
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Scale(osize, Image.BICUBIC))
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.RandomHorizontalFlip())
if opt.resize_or_crop != 'no_resize':
transform_list.append(transforms.RandomCrop(opt.fineSize))
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
self.transform = transforms.Compose(transform_list)
def __getitem__(self, index):
A_path = self.A_paths[index % self.A_size]
B_path = self.B_paths[index % self.B_size]
A_img = Image.open(A_path).convert('RGB')
B_img = Image.open(B_path).convert('RGB')
A_img = self.transform(A_img)
B_img = self.transform(B_img)
return {'A': A_img, 'B': B_img,
'A_paths': A_path, 'B_paths': B_path}
def __len__(self):
return max(self.A_size, self.B_size)
def name(self):
return 'UnalignedDataset'