Skip to content

Commit

Permalink
Same size for pairs of images in HPatches preprocessing
Browse files Browse the repository at this point in the history
  • Loading branch information
rpautrat committed Nov 5, 2019
1 parent 90488b0 commit c2c4102
Show file tree
Hide file tree
Showing 2 changed files with 166 additions and 135 deletions.
230 changes: 138 additions & 92 deletions notebooks/visualize_hpatches.ipynb

Large diffs are not rendered by default.

71 changes: 28 additions & 43 deletions superpoint/datasets/patches_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,53 +59,40 @@ def _preprocess(image):
**config['preprocessing'])
return tf.to_float(image)

def _preprocess_warped(zip_data):
image = zip_data['image']
new_size = tf.to_float(tf.shape(image)[:2]) * zip_data['scale']
tf.Tensor.set_shape(image, [None, None, 3])
image = tf.image.rgb_to_grayscale(image)
image = tf.image.resize_images(image, tf.to_int32(new_size),
method=tf.image.ResizeMethod.BILINEAR)
return tf.to_float(image)

def _warp_image(image):
H = sample_homography(tf.shape(image)[:2])
warped_im = tf.contrib.image.transform(image, H, interpolation="BILINEAR")
return {'warped_im': warped_im, 'H': H}

def _adapt_homography_to_preprocessing(zip_data):
image = zip_data['image']
H = tf.cast(zip_data['homography'], tf.float32)
source_size = tf.cast(tf.shape(image)[:2], tf.float32)
source_size = tf.cast(zip_data['shape'], tf.float32)
source_warped_size = tf.cast(zip_data['warped_shape'], tf.float32)
target_size = tf.cast(tf.convert_to_tensor(config['preprocessing']['resize']),
tf.float32)

# Compute the scaling ratio due to the resizing for both images
s = tf.reduce_max(tf.divide(target_size, source_size))
up_scale = tf.diag(tf.stack([s, s, tf.constant(1.)]))
warped_s = tf.reduce_max(tf.divide(target_size, source_warped_size))
down_scale = tf.diag(tf.stack([1 / warped_s, 1 / warped_s, tf.constant(1.)]))

fit_height = tf.greater(tf.divide(target_size[0], source_size[0]),
tf.divide(target_size[1], source_size[1]))

padding_y = tf.to_int32(((source_size[0] * s - target_size[0]) / tf.constant(2.0)))
padding_x = tf.to_int32(((source_size[1] * s - target_size[1]) / tf.constant(2.0)))

tx = tf.cond(fit_height, lambda: padding_x, lambda: tf.constant(0))
ty = tf.cond(fit_height, lambda: tf.constant(0), lambda: padding_y)
translation = tf.stack([tf.constant(1), tf.constant(0), tx,
tf.constant(0), tf.constant(1), ty,
# Compute the translation due to the crop for both images
pad_y = tf.to_int32(((source_size[0] * s - target_size[0]) / tf.constant(2.0)))
pad_x = tf.to_int32(((source_size[1] * s - target_size[1]) / tf.constant(2.0)))
translation = tf.stack([tf.constant(1), tf.constant(0), -pad_x,
tf.constant(0), tf.constant(1), -pad_y,
tf.constant(0),tf.constant(0), tf.constant(1)])
translation = tf.to_float(tf.reshape(translation, [3,3]))
pad_y = tf.to_int32(((source_warped_size[0] * warped_s - target_size[0])
/ tf.constant(2.0)))
pad_x = tf.to_int32(((source_warped_size[1] * warped_s - target_size[1])
/ tf.constant(2.0)))
warped_translation = tf.stack([tf.constant(1), tf.constant(0), pad_x,
tf.constant(0), tf.constant(1), pad_y,
tf.constant(0),tf.constant(0), tf.constant(1)])
warped_translation = tf.to_float(tf.reshape(warped_translation, [3,3]))

down_scale = tf.diag(tf.stack([1/s, 1/s, tf.constant(1.)]))
up_scale = tf.diag(tf.stack([s, s, tf.constant(1.)]))
H = up_scale @ H @ down_scale @ translation
H = translation @ up_scale @ H @ down_scale @ warped_translation
return H

def _get_shape(image):
return tf.shape(image)[:2]

def _get_scale(shape):
return tf.reduce_max(tf.cast(tf.divide(
tf.convert_to_tensor(config['preprocessing']['resize'], dtype=tf.float32),
tf.to_float(shape)), tf.float32))

images = tf.data.Dataset.from_tensor_slices(files['image_paths'])
images = images.map(lambda path: tf.py_func(_read_image, [path], tf.uint8))
Expand All @@ -115,17 +102,15 @@ def _get_scale(shape):
[path],
tf.uint8))
if config['preprocessing']['resize']:
homographies = tf.data.Dataset.zip({'image': images,
'homography': homographies})
shapes = images.map(_get_shape)
warped_shapes = warped_images.map(_get_shape)
homographies = tf.data.Dataset.zip({'homography': homographies,
'shape': shapes,
'warped_shape': warped_shapes})
homographies = homographies.map(_adapt_homography_to_preprocessing)
images_shape = images.map(_get_shape)
scales = images_shape.map(_get_scale)
warped_images = tf.data.Dataset.zip({'image': warped_images,
'scale': scales})
warped_images = warped_images.map(_preprocess_warped)
else:
warped_images = warped_images.map(_preprocess)

images = images.map(_preprocess)
warped_images = warped_images.map(_preprocess)

images = images.map(lambda img: tf.to_float(img) / 255.)
warped_images = warped_images.map(lambda img: tf.to_float(img) / 255.)
Expand Down

0 comments on commit c2c4102

Please sign in to comment.