forked from qinzheng93/GeoTransformer
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
reorganize code; support Kitti & ModelNet
- Loading branch information
1 parent
d14834c
commit 73e1439
Showing
311 changed files
with
10,989 additions
and
6,783 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
*.pth.tar filter=lfs diff=lfs merge=lfs -text |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
.idea | ||
.vscode |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,26 @@ | ||
import os | ||
import os.path as osp | ||
import open3d as o3d | ||
import numpy as np | ||
import glob | ||
from tqdm import tqdm | ||
|
||
|
||
def main(): | ||
for i in range(11): | ||
seq_id = '{:02d}'.format(i) | ||
file_names = glob.glob(osp.join('sequences', seq_id, 'velodyne', '*.bin')) | ||
for file_name in tqdm(file_names): | ||
frame = file_name.split('/')[-1][:-4] | ||
new_file_name = osp.join('downsampled', seq_id, frame + '.npy') | ||
points = np.fromfile(file_name, dtype=np.float32).reshape(-1, 4) | ||
points = points[:, :3] | ||
pcd = o3d.geometry.PointCloud() | ||
pcd.points = o3d.utility.Vector3dVector(points) | ||
pcd = pcd.voxel_down_sample(0.3) | ||
points = np.array(pcd.points).astype(np.float32) | ||
np.save(new_file_name, points) | ||
|
||
|
||
if __name__ == '__main__': | ||
main() |
Binary file not shown.
Binary file not shown.
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,47 @@ | ||
import h5py | ||
import numpy as np | ||
import pickle | ||
|
||
|
||
def dump_pickle(data, filename): | ||
with open(filename, 'wb') as f: | ||
pickle.dump(data, f) | ||
|
||
|
||
def process(subset): | ||
with open(f'modelnet40_ply_hdf5_2048/{subset}_files.txt') as f: | ||
lines = f.readlines() | ||
all_points = [] | ||
all_normals = [] | ||
all_labels = [] | ||
for line in lines: | ||
filename = line.strip() | ||
h5file = h5py.File(f'modelnet40_ply_hdf5_2048/{filename}', 'r') | ||
all_points.append(h5file['data'][:]) | ||
all_normals.append(h5file['normal'][:]) | ||
all_labels.append(h5file['label'][:].flatten().astype(np.int)) | ||
points = np.concatenate(all_points, axis=0) | ||
normals = np.concatenate(all_normals, axis=0) | ||
labels = np.concatenate(all_labels, axis=0) | ||
print(f'{subset} data loaded.') | ||
all_data = [] | ||
num_data = points.shape[0] | ||
for i in range(num_data): | ||
all_data.append(dict(points=points[i], normals=normals[i], label=labels[i])) | ||
if subset == 'train': | ||
indices = np.random.permutation(num_data) | ||
num_train = int(num_data * 0.8) | ||
num_val = num_data - num_train | ||
train_indices = indices[:num_train] | ||
val_indices = indices[num_train:] | ||
train_data = [all_data[i] for i in train_indices.tolist()] | ||
dump_pickle(train_data, 'train.pkl') | ||
val_data = [all_data[i] for i in val_indices.tolist()] | ||
dump_pickle(val_data, 'val.pkl') | ||
else: | ||
dump_pickle(all_data, 'test.pkl') | ||
|
||
|
||
|
||
for subset in ['train', 'test']: | ||
process(subset) |
Binary file added
BIN
+2.57 KB
...former.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/__pycache__/backbone.cpython-38.pyc
Binary file not shown.
Binary file added
BIN
+3.63 KB
...nsformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/__pycache__/config.cpython-38.pyc
Binary file not shown.
Binary file added
BIN
+1.68 KB
...sformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/__pycache__/dataset.cpython-38.pyc
Binary file not shown.
Binary file added
BIN
+5.5 KB
...ransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/__pycache__/loss.cpython-38.pyc
Binary file not shown.
Binary file added
BIN
+5.23 KB
...ansformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/__pycache__/model.cpython-38.pyc
Binary file not shown.
87 changes: 87 additions & 0 deletions
87
experiments/geotransformer.3dmatch.stage4.gse.k3.max.oacl.stage2.sinkhorn/backbone.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,87 @@ | ||
import torch | ||
import torch.nn as nn | ||
from IPython import embed | ||
|
||
from geotransformer.modules.kpconv import ConvBlock, ResidualBlock, UnaryBlock, LastUnaryBlock, nearest_upsample | ||
|
||
|
||
class KPConvFPN(nn.Module): | ||
def __init__(self, input_dim, output_dim, init_dim, kernel_size, init_radius, init_sigma, group_norm): | ||
super(KPConvFPN, self).__init__() | ||
|
||
self.encoder1_1 = ConvBlock(input_dim, init_dim, kernel_size, init_radius, init_sigma, group_norm) | ||
self.encoder1_2 = ResidualBlock(init_dim, init_dim * 2, kernel_size, init_radius, init_sigma, group_norm) | ||
|
||
self.encoder2_1 = ResidualBlock( | ||
init_dim * 2, init_dim * 2, kernel_size, init_radius, init_sigma, group_norm, strided=True | ||
) | ||
self.encoder2_2 = ResidualBlock( | ||
init_dim * 2, init_dim * 4, kernel_size, init_radius * 2, init_sigma * 2, group_norm | ||
) | ||
self.encoder2_3 = ResidualBlock( | ||
init_dim * 4, init_dim * 4, kernel_size, init_radius * 2, init_sigma * 2, group_norm | ||
) | ||
|
||
self.encoder3_1 = ResidualBlock( | ||
init_dim * 4, init_dim * 4, kernel_size, init_radius * 2, init_sigma * 2, group_norm, strided=True | ||
) | ||
self.encoder3_2 = ResidualBlock( | ||
init_dim * 4, init_dim * 8, kernel_size, init_radius * 4, init_sigma * 4, group_norm | ||
) | ||
self.encoder3_3 = ResidualBlock( | ||
init_dim * 8, init_dim * 8, kernel_size, init_radius * 4, init_sigma * 4, group_norm | ||
) | ||
|
||
self.encoder4_1 = ResidualBlock( | ||
init_dim * 8, init_dim * 8, kernel_size, init_radius * 4, init_sigma * 4, group_norm, strided=True | ||
) | ||
self.encoder4_2 = ResidualBlock( | ||
init_dim * 8, init_dim * 16, kernel_size, init_radius * 8, init_sigma * 8, group_norm | ||
) | ||
self.encoder4_3 = ResidualBlock( | ||
init_dim * 16, init_dim * 16, kernel_size, init_radius * 8, init_sigma * 8, group_norm | ||
) | ||
|
||
self.decoder3 = UnaryBlock(init_dim * 24, init_dim * 8, group_norm) | ||
self.decoder2 = LastUnaryBlock(init_dim * 12, output_dim) | ||
|
||
def forward(self, feats, data_dict): | ||
feats_list = [] | ||
|
||
points_list = data_dict['points'] | ||
neighbors_list = data_dict['neighbors'] | ||
subsampling_list = data_dict['subsampling'] | ||
upsampling_list = data_dict['upsampling'] | ||
|
||
feats_s1 = feats | ||
feats_s1 = self.encoder1_1(feats_s1, points_list[0], points_list[0], neighbors_list[0]) | ||
feats_s1 = self.encoder1_2(feats_s1, points_list[0], points_list[0], neighbors_list[0]) | ||
|
||
feats_s2 = self.encoder2_1(feats_s1, points_list[1], points_list[0], subsampling_list[0]) | ||
feats_s2 = self.encoder2_2(feats_s2, points_list[1], points_list[1], neighbors_list[1]) | ||
feats_s2 = self.encoder2_3(feats_s2, points_list[1], points_list[1], neighbors_list[1]) | ||
|
||
feats_s3 = self.encoder3_1(feats_s2, points_list[2], points_list[1], subsampling_list[1]) | ||
feats_s3 = self.encoder3_2(feats_s3, points_list[2], points_list[2], neighbors_list[2]) | ||
feats_s3 = self.encoder3_3(feats_s3, points_list[2], points_list[2], neighbors_list[2]) | ||
|
||
feats_s4 = self.encoder4_1(feats_s3, points_list[3], points_list[2], subsampling_list[2]) | ||
feats_s4 = self.encoder4_2(feats_s4, points_list[3], points_list[3], neighbors_list[3]) | ||
feats_s4 = self.encoder4_3(feats_s4, points_list[3], points_list[3], neighbors_list[3]) | ||
|
||
latent_s4 = feats_s4 | ||
feats_list.append(feats_s4) | ||
|
||
latent_s3 = nearest_upsample(latent_s4, upsampling_list[2]) | ||
latent_s3 = torch.cat([latent_s3, feats_s3], dim=1) | ||
latent_s3 = self.decoder3(latent_s3) | ||
feats_list.append(latent_s3) | ||
|
||
latent_s2 = nearest_upsample(latent_s3, upsampling_list[1]) | ||
latent_s2 = torch.cat([latent_s2, feats_s2], dim=1) | ||
latent_s2 = self.decoder2(latent_s2) | ||
feats_list.append(latent_s2) | ||
|
||
feats_list.reverse() | ||
|
||
return feats_list |
Oops, something went wrong.