Skip to content

Commit

Permalink
Merge pull request rpautrat#11 from rpautrat/improve_homographies
Browse files Browse the repository at this point in the history
Improve homographies
  • Loading branch information
rpautrat authored Jul 13, 2018
2 parents c430ca3 + 3547be7 commit 65aff36
Show file tree
Hide file tree
Showing 7 changed files with 162 additions and 207 deletions.
214 changes: 69 additions & 145 deletions notebooks/detector_repeatability_hpatches.ipynb

Large diffs are not rendered by default.

86 changes: 53 additions & 33 deletions notebooks/visualize_random_homography.ipynb

Large diffs are not rendered by default.

8 changes: 4 additions & 4 deletions superpoint/configs/magic-point_coco_export.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@ model:
name: 'magic_point'
batch_size: 50
learning_rate: 0.001
detection_threshold: 0.015 # 1/65
detection_threshold: 0.001 # 0.015
nms: 4
top_k: 300
top_k: 1000
homography_adaptation:
num: 100
aggregation: 'sum'
Expand All @@ -19,8 +19,8 @@ model:
scaling: true
perspective: true
scaling_amplitude: 0.1
perspective_amplitude_x: 0.15
perspective_amplitude_y: 0.15
perspective_amplitude_x: 0.1
perspective_amplitude_y: 0.1
allow_artifacts: true
patch_ratio: 0.5
eval_iter: -1
12 changes: 6 additions & 6 deletions superpoint/configs/magic-point_coco_train.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
data:
name: 'coco'
labels: null # Complete with your export labels
labels: outputs/mp_synth-v7_ha2-100_no-topk # Complete with your export labels
cache_in_memory: false
validation_size: 192
augmentation:
Expand All @@ -26,19 +26,19 @@ data:
scaling: true
perspective: true
scaling_amplitude: 0.2
perspective_amplitude_x: 0.4
perspective_amplitude_y: 0.3
perspective_amplitude_x: 0.1
perspective_amplitude_y: 0.1
patch_ratio: 0.85
max_angle: 3.14
max_angle: 1.57
allow_artifacts: true
valid_border_margin: 3
model:
name: 'magic_point'
batch_size: 32
eval_batch_size: 32
learning_rate: 0.001
detection_threshold: 0.015 # 1/65
detection_threshold: 0.001 # 0.015
nms: 4
top_k: 300
# top_k: 300
train_iter: 200000
validation_interval: 1000
13 changes: 7 additions & 6 deletions superpoint/configs/magic-point_repeatability.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,17 +3,17 @@ data:
dataset: 'hpatches' # 'coco' 'hpatches'
alteration: 'v' # 'all' 'i' 'v'
preprocessing:
resize: [240, 320] # False for coco
resize: [240, 320] # [240, 320] for HPatches and False for coco
model:
name: 'magic_point'
batch_size: 50
eval_batch_size: 50
learning_rate: 0.001
detection_threshold: 0.001
nms: 4
top_k: 300
top_k: 1000
homography_adaptation:
num: 100
num: 0
aggregation: 'sum'
filter_counts: 0
homographies:
Expand All @@ -22,7 +22,8 @@ model:
scaling: true
perspective: true
scaling_amplitude: 0.1
perspective_amplitude_x: 0.05
perspective_amplitude_y: 0.05
allow_artifacts: false
perspective_amplitude_x: 0.1
perspective_amplitude_y: 0.1
allow_artifacts: true
patch_ratio: 0.5
eval_iter: 1000
8 changes: 3 additions & 5 deletions superpoint/evaluations/detector_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def filter_keypoints(points, shape):

def keep_true_keypoints(points, H, shape):
""" Keep only the points whose warped coordinates by H
are still inside shape.. """
are still inside shape. """
warped_points = warp_keypoints(points[:, [1, 0]], H)
warped_points[:, [0, 1]] = warped_points[:, [1, 0]]
mask = (warped_points[:, 0] >= 0) & (warped_points[:, 0] < shape[0]) &\
Expand Down Expand Up @@ -189,7 +189,6 @@ def select_k_best(points, k):
warped_keypoints = np.stack([warped_keypoints[0],
warped_keypoints[1],
warped_prob], axis=-1)
warped_keypoints = select_k_best(warped_keypoints, keep_k_points) # TODO
warped_keypoints = keep_true_keypoints(warped_keypoints, np.linalg.inv(H),
data['prob'].shape)

Expand All @@ -198,12 +197,11 @@ def select_k_best(points, k):
true_warped_keypoints = np.stack([true_warped_keypoints[:, 1],
true_warped_keypoints[:, 0],
prob], axis=-1)
true_warped_keypoints = select_k_best(true_warped_keypoints, keep_k_points) # TODO
true_warped_keypoints = filter_keypoints(true_warped_keypoints, shape)

# Keep only the keep_k_points best predictions
# warped_keypoints = select_k_best(warped_keypoints, keep_k_points)
# true_warped_keypoints = select_k_best(true_warped_keypoints, keep_k_points)
warped_keypoints = select_k_best(warped_keypoints, keep_k_points)
true_warped_keypoints = select_k_best(true_warped_keypoints, keep_k_points)

# Compute the repeatability
N1 = true_warped_keypoints.shape[0]
Expand Down
28 changes: 20 additions & 8 deletions superpoint/models/homographies.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
'scaling': True,
'perspective': True,
'scaling_amplitude': 0.1,
'perspective_amplitude_x': 0.2,
'perspective_amplitude_x': 0.1,
'perspective_amplitude_y': 0.1,
'patch_ratio': 0.5,
'max_angle': pi,
Expand Down Expand Up @@ -104,8 +104,8 @@ def step(i, probs, counts, images):

def sample_homography(
shape, perspective=True, scaling=True, rotation=True, translation=True,
n_scales=5, n_angles=25, scaling_amplitude=0.1, perspective_amplitude_x=0.2,
perspective_amplitude_y=0.1, patch_ratio=0.5, max_angle=pi,
n_scales=5, n_angles=25, scaling_amplitude=0.1, perspective_amplitude_x=0.1,
perspective_amplitude_y=0.1, patch_ratio=0.5, max_angle=pi/2,
allow_artifacts=False, translation_overflow=0.):
"""Sample a random valid homography.
Expand All @@ -122,8 +122,15 @@ def sample_homography(
scaling: A boolean that enables the random scaling of the patch.
rotation: A boolean that enables the random rotation of the patch.
translation: A boolean that enables the random translation of the patch.
n_scales: the number of tentative scales that are sampled when scaling.
n_angles: the number of tentatives angles that are sampled when rotating.
n_scales: The number of tentative scales that are sampled when scaling.
n_angles: The number of tentatives angles that are sampled when rotating.
scaling_amplitude: Controls the amount of scale.
perspective_amplitude_x: Controls the perspective effect in x direction.
perspective_amplitude_y: Controls the perspective effect in y direction.
patch_ratio: Controls the size of the patches used to create the homography.
max_angle: Maximum angle used in rotations.
allow_artifacts: A boolean that enables artifacts when applying the homography.
translation_overflow: Amount of border artifacts caused by translation.
Returns:
A `Tensor` of shape `[1, 8]` corresponding to the flattened homography transform.
Expand All @@ -142,9 +149,14 @@ def sample_homography(
if not allow_artifacts:
perspective_amplitude_x = min(perspective_amplitude_x, margin)
perspective_amplitude_y = min(perspective_amplitude_y, margin)
pts2 += tf.concat([tf.truncated_normal([4, 1], 0., perspective_amplitude_x/2),
tf.truncated_normal([4, 1], 0., perspective_amplitude_y/2)],
axis=1)
perspective_displacement = tf.truncated_normal([1], 0., perspective_amplitude_y)
h_displacement_left = tf.truncated_normal([1], 0., perspective_amplitude_x)
h_displacement_right = tf.truncated_normal([1], 0., perspective_amplitude_x)
pts2 += tf.stack([tf.concat([h_displacement_left, perspective_displacement], 0),
tf.concat([h_displacement_left, -perspective_displacement], 0),
tf.concat([h_displacement_right, perspective_displacement], 0),
tf.concat([h_displacement_right, -perspective_displacement],
0)])

# Random scaling
# sample several scales, check collision with borders, randomly pick a valid one
Expand Down

0 comments on commit 65aff36

Please sign in to comment.