forked from zyf12389/GC-Net
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathread_data.py
60 lines (55 loc) · 2.27 KB
/
read_data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
from torch.utils.data import Dataset, DataLoader
import pickle
import cv2
from python_pfm import readPFM
import numpy as np
import torch
class sceneDisp(Dataset):
def __init__(self, root_dir,settype,transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = root_dir
self.settype=settype
self.transform = transform
finl = open('paths_left_'+settype+'.pkl','rb')
finr = open('paths_right_'+settype+'.pkl', 'rb')
self.paths_left = pickle.load(finl)
self.paths_right = pickle.load(finr)
finl.close()
finr.close()
finl = open('disp_left_'+settype+'.pkl', 'rb')
finr = open('disp_right_'+settype+'.pkl', 'rb')
self.disp_left = pickle.load(finl)
self.disp_right = pickle.load(finr)
finl.close()
finr.close()
for i in range(len(self.paths_left)): #to solve the inconsistency of index between rgb and depth
a = self.paths_left[i].split('/')[-1].split('.')[0]
b = self.disp_left[i].split('/')[-1]
l=self.disp_left[i].replace(b, a + '.pfm')
r=self.disp_right[i].replace(b, a + '.pfm')
self.disp_left[i]=l
self.disp_right[i]=r
def __len__(self):
if self.settype=='train':
return 35454
if self.settype=='test':
return 4370
def __getitem__(self, idx):
# print(self.paths_left[idx])
# print(self.paths_right[idx])
# print(self.disp_left[idx])
# print(self.disp_right[idx])
imageL = cv2.imread(self.paths_left[idx]).reshape(540,960,3)#.transpose((2, 0, 1))
imageR = cv2.imread(self.paths_right[idx]).reshape(540,960,3)#.transpose((2, 0, 1))
dispL = readPFM(self.disp_left[idx])[0].astype(np.uint8).reshape(540,960,1).transpose((2, 0, 1))
sample = {'imL': imageL, 'imR': imageR, 'dispL': dispL}
if self.transform is not None:
sample['imL']=self.transform(sample['imL'])
sample['imR']=self.transform(sample['imR'])
return sample