Skip to content

Commit

Permalink
kasan: introduce poison_kmalloc_large_redzone
Browse files Browse the repository at this point in the history
Split out a poison_kmalloc_large_redzone helper from __kasan_kmalloc_large
and use it in the caller's code.

This is a preparatory change for the following patches in this series.

Link: https://lkml.kernel.org/r/93317097b668519d76097fb065201b2027436e22.1703024586.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <[email protected]>
Cc: Alexander Lobakin <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Andrey Ryabinin <[email protected]>
Cc: Breno Leitao <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Evgenii Stepanov <[email protected]>
Cc: Marco Elver <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
xairy authored and akpm00 committed Dec 29, 2023
1 parent ce37eec commit 0cc9fdb
Showing 1 changed file with 23 additions and 18 deletions.
41 changes: 23 additions & 18 deletions mm/kasan/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -363,23 +363,12 @@ void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object
}
EXPORT_SYMBOL(__kasan_kmalloc);

void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
static inline void poison_kmalloc_large_redzone(const void *ptr, size_t size,
gfp_t flags)
{
unsigned long redzone_start;
unsigned long redzone_end;

if (gfpflags_allow_blocking(flags))
kasan_quarantine_reduce();

if (unlikely(ptr == NULL))
return NULL;

/*
* The object has already been unpoisoned by kasan_unpoison_pages() for
* alloc_pages() or by kasan_krealloc() for krealloc().
*/

/*
* The redzone has byte-level precision for the generic mode.
* Partially poison the last object granule to cover the unaligned
Expand All @@ -389,19 +378,35 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
kasan_poison_last_granule(ptr, size);

/* Poison the aligned part of the redzone. */
redzone_start = round_up((unsigned long)(ptr + size),
KASAN_GRANULE_SIZE);
redzone_start = round_up((unsigned long)(ptr + size), KASAN_GRANULE_SIZE);
redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
kasan_poison((void *)redzone_start, redzone_end - redzone_start,
KASAN_PAGE_REDZONE, false);
}

void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
gfp_t flags)
{
if (gfpflags_allow_blocking(flags))
kasan_quarantine_reduce();

if (unlikely(ptr == NULL))
return NULL;

/* The object has already been unpoisoned by kasan_unpoison_pages(). */
poison_kmalloc_large_redzone(ptr, size, flags);

/* Keep the tag that was set by alloc_pages(). */
return (void *)ptr;
}

void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
{
struct slab *slab;

if (gfpflags_allow_blocking(flags))
kasan_quarantine_reduce();

if (unlikely(object == ZERO_SIZE_PTR))
return (void *)object;

Expand All @@ -419,11 +424,11 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag

/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
if (unlikely(!slab))
return __kasan_kmalloc_large(object, size, flags);
else {
poison_kmalloc_large_redzone(object, size, flags);
else
poison_kmalloc_redzone(slab->slab_cache, object, size, flags);
return (void *)object;
}

return (void *)object;
}

bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
Expand Down

0 comments on commit 0cc9fdb

Please sign in to comment.