Skip to content

Commit

Permalink
kasan: save alloc stack traces for mempool
Browse files Browse the repository at this point in the history
Update kasan_mempool_unpoison_object to properly poison the redzone and
save alloc strack traces for kmalloc and slab pools.

As a part of this change, split out and use a unpoison_slab_object helper
function from __kasan_slab_alloc.

[[email protected]: mark unpoison_slab_object() as static]
  Link: https://lkml.kernel.org/r/[email protected]
Link: https://lkml.kernel.org/r/05ad235da8347cfe14d496d01b2aaf074b4f607c.1703024586.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <[email protected]>
Signed-off-by: Nathan Chancellor <[email protected]>
Cc: Alexander Lobakin <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Andrey Ryabinin <[email protected]>
Cc: Breno Leitao <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Evgenii Stepanov <[email protected]>
Cc: Marco Elver <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
xairy authored and akpm00 committed Dec 29, 2023
1 parent 0cc9fdb commit 29d7355
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 13 deletions.
7 changes: 4 additions & 3 deletions include/linux/kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -303,9 +303,10 @@ void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
* mempool).
*
* This function unpoisons a slab allocation that was previously poisoned via
* kasan_mempool_poison_object() without initializing its memory. For the
* tag-based modes, this function does not assign a new tag to the allocation
* and instead restores the original tags based on the pointer value.
* kasan_mempool_poison_object() and saves an alloc stack trace for it without
* initializing the allocation's memory. For the tag-based modes, this function
* does not assign a new tag to the allocation and instead restores the
* original tags based on the pointer value.
*
* This function operates on all slab allocations including large kmalloc
* allocations (the ones returned by kmalloc_large() or by kmalloc() with the
Expand Down
50 changes: 40 additions & 10 deletions mm/kasan/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,20 @@ void __kasan_kfree_large(void *ptr, unsigned long ip)
/* The object will be poisoned by kasan_poison_pages(). */
}

static inline void unpoison_slab_object(struct kmem_cache *cache, void *object,
gfp_t flags, bool init)
{
/*
* Unpoison the whole object. For kmalloc() allocations,
* poison_kmalloc_redzone() will do precise poisoning.
*/
kasan_unpoison(object, cache->object_size, init);

/* Save alloc info (if possible) for non-kmalloc() allocations. */
if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
kasan_save_alloc_info(cache, object, flags);
}

void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
void *object, gfp_t flags, bool init)
{
Expand All @@ -299,15 +313,8 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
tag = assign_tag(cache, object, false);
tagged_object = set_tag(object, tag);

/*
* Unpoison the whole object.
* For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
*/
kasan_unpoison(tagged_object, cache->object_size, init);

/* Save alloc info (if possible) for non-kmalloc() allocations. */
if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
kasan_save_alloc_info(cache, tagged_object, flags);
/* Unpoison the object and save alloc info for non-kmalloc() allocations. */
unpoison_slab_object(cache, tagged_object, flags, init);

return tagged_object;
}
Expand Down Expand Up @@ -482,7 +489,30 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)

void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
{
kasan_unpoison(ptr, size, false);
struct slab *slab;
gfp_t flags = 0; /* Might be executing under a lock. */

if (is_kfence_address(kasan_reset_tag(ptr)))
return;

slab = virt_to_slab(ptr);

/*
* This function can be called for large kmalloc allocation that get
* their memory from page_alloc.
*/
if (unlikely(!slab)) {
kasan_unpoison(ptr, size, false);
poison_kmalloc_large_redzone(ptr, size, flags);
return;
}

/* Unpoison the object and save alloc info for non-kmalloc() allocations. */
unpoison_slab_object(slab->slab_cache, ptr, size, flags);

/* Poison the redzone and save alloc info for kmalloc() allocations. */
if (is_kmalloc_cache(slab->slab_cache))
poison_kmalloc_redzone(slab->slab_cache, ptr, size, flags);
}

bool __kasan_check_byte(const void *address, unsigned long ip)
Expand Down

0 comments on commit 29d7355

Please sign in to comment.