Skip to content

Commit

Permalink
kfence: shorten critical sections of alloc/free
Browse files Browse the repository at this point in the history
Initializing memory and setting/checking the canary bytes is relatively
expensive, and doing so in the meta->lock critical sections extends the
duration with preemption and interrupts disabled unnecessarily.

Any reads to meta->addr and meta->size in kfence_guarded_alloc() and
kfence_guarded_free() don't require locking meta->lock as long as the
object is removed from the freelist: only kfence_guarded_alloc() sets
meta->addr and meta->size after removing it from the freelist, which
requires a preceding kfence_guarded_free() returning it to the list or
the initial state.

Therefore move reads to meta->addr and meta->size, including expensive
memory initialization using them, out of meta->lock critical sections.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Marco Elver <[email protected]>
Acked-by: Alexander Potapenko <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Jann Horn <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
melver authored and torvalds committed Nov 6, 2021
1 parent f51733e commit 4933295
Showing 1 changed file with 21 additions and 17 deletions.
38 changes: 21 additions & 17 deletions mm/kfence/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -309,12 +309,19 @@ static inline bool set_canary_byte(u8 *addr)
/* Check canary byte at @addr. */
static inline bool check_canary_byte(u8 *addr)
{
struct kfence_metadata *meta;
unsigned long flags;

if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
return true;

atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
kfence_report_error((unsigned long)addr, false, NULL, addr_to_metadata((unsigned long)addr),
KFENCE_ERROR_CORRUPTION);

meta = addr_to_metadata((unsigned long)addr);
raw_spin_lock_irqsave(&meta->lock, flags);
kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
raw_spin_unlock_irqrestore(&meta->lock, flags);

return false;
}

Expand All @@ -324,8 +331,6 @@ static __always_inline void for_each_canary(const struct kfence_metadata *meta,
const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
unsigned long addr;

lockdep_assert_held(&meta->lock);

/*
* We'll iterate over each canary byte per-side until fn() returns
* false. However, we'll still iterate over the canary bytes to the
Expand Down Expand Up @@ -414,8 +419,9 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
WRITE_ONCE(meta->cache, cache);
meta->size = size;
meta->alloc_stack_hash = alloc_stack_hash;
raw_spin_unlock_irqrestore(&meta->lock, flags);

for_each_canary(meta, set_canary_byte);
alloc_covered_add(alloc_stack_hash, 1);

/* Set required struct page fields. */
page = virt_to_page(meta->addr);
Expand All @@ -425,11 +431,8 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
if (IS_ENABLED(CONFIG_SLAB))
page->s_mem = addr;

raw_spin_unlock_irqrestore(&meta->lock, flags);

alloc_covered_add(alloc_stack_hash, 1);

/* Memory initialization. */
for_each_canary(meta, set_canary_byte);

/*
* We check slab_want_init_on_alloc() ourselves, rather than letting
Expand All @@ -454,6 +457,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
{
struct kcsan_scoped_access assert_page_exclusive;
unsigned long flags;
bool init;

raw_spin_lock_irqsave(&meta->lock, flags);

Expand Down Expand Up @@ -481,6 +485,13 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
meta->unprotected_page = 0;
}

/* Mark the object as freed. */
metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
init = slab_want_init_on_free(meta->cache);
raw_spin_unlock_irqrestore(&meta->lock, flags);

alloc_covered_add(meta->alloc_stack_hash, -1);

/* Check canary bytes for memory corruption. */
for_each_canary(meta, check_canary_byte);

Expand All @@ -489,16 +500,9 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
* data is still there, and after a use-after-free is detected, we
* unprotect the page, so the data is still accessible.
*/
if (!zombie && unlikely(slab_want_init_on_free(meta->cache)))
if (!zombie && unlikely(init))
memzero_explicit(addr, meta->size);

/* Mark the object as freed. */
metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);

raw_spin_unlock_irqrestore(&meta->lock, flags);

alloc_covered_add(meta->alloc_stack_hash, -1);

/* Protect to detect use-after-frees. */
kfence_protect((unsigned long)addr);

Expand Down

0 comments on commit 4933295

Please sign in to comment.