Skip to content

Commit

Permalink
mm: slub: add kernel address sanitizer support for slub allocator
Browse files Browse the repository at this point in the history
With this patch kasan will be able to catch bugs in memory allocated by
slub.  Initially all objects in newly allocated slab page, marked as
redzone.  Later, when allocation of slub object happens, requested by
caller number of bytes marked as accessible, and the rest of the object
(including slub's metadata) marked as redzone (inaccessible).

We also mark object as accessible if ksize was called for this object.
There is some places in kernel where ksize function is called to inquire
size of really allocated area.  Such callers could validly access whole
allocated memory, so it should be marked as accessible.

Code in slub.c and slab_common.c files could validly access to object's
metadata, so instrumentation for this files are disabled.

Signed-off-by: Andrey Ryabinin <[email protected]>
Signed-off-by: Dmitry Chernenkov <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Konstantin Serebryany <[email protected]>
Signed-off-by: Andrey Konovalov <[email protected]>
Cc: Yuri Gribov <[email protected]>
Cc: Konstantin Khlebnikov <[email protected]>
Cc: Sasha Levin <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Andi Kleen <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
aryabinin authored and torvalds committed Feb 14, 2015
1 parent a79316c commit 0316bec
Show file tree
Hide file tree
Showing 9 changed files with 197 additions and 5 deletions.
27 changes: 27 additions & 0 deletions include/linux/kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,18 @@ void kasan_unpoison_shadow(const void *address, size_t size);
void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order);

void kasan_poison_slab(struct page *page);
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
void kasan_poison_object_data(struct kmem_cache *cache, void *object);

void kasan_kmalloc_large(const void *ptr, size_t size);
void kasan_kfree_large(const void *ptr);
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
void kasan_krealloc(const void *object, size_t new_size);

void kasan_slab_alloc(struct kmem_cache *s, void *object);
void kasan_slab_free(struct kmem_cache *s, void *object);

#else /* CONFIG_KASAN */

static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
Expand All @@ -47,6 +59,21 @@ static inline void kasan_disable_current(void) {}
static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
static inline void kasan_free_pages(struct page *page, unsigned int order) {}

static inline void kasan_poison_slab(struct page *page) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
void *object) {}
static inline void kasan_poison_object_data(struct kmem_cache *cache,
void *object) {}

static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
static inline void kasan_kfree_large(const void *ptr) {}
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
size_t size) {}
static inline void kasan_krealloc(const void *object, size_t new_size) {}

static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}

#endif /* CONFIG_KASAN */

#endif /* LINUX_KASAN_H */
11 changes: 9 additions & 2 deletions include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@
(unsigned long)ZERO_SIZE_PTR)

#include <linux/kmemleak.h>
#include <linux/kasan.h>

struct mem_cgroup;
/*
Expand Down Expand Up @@ -325,15 +326,21 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
gfp_t flags, size_t size)
{
return kmem_cache_alloc(s, flags);
void *ret = kmem_cache_alloc(s, flags);

kasan_kmalloc(s, ret, size);
return ret;
}

static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
int node, size_t size)
{
return kmem_cache_alloc_node(s, gfpflags, node);
void *ret = kmem_cache_alloc_node(s, gfpflags, node);

kasan_kmalloc(s, ret, size);
return ret;
}
#endif /* CONFIG_TRACING */

Expand Down
1 change: 1 addition & 0 deletions lib/Kconfig.kasan
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ if HAVE_ARCH_KASAN

config KASAN
bool "KASan: runtime memory debugger"
depends on SLUB_DEBUG
help
Enables kernel address sanitizer - runtime memory debugger,
designed to find out-of-bounds accesses and use-after-free bugs.
Expand Down
3 changes: 3 additions & 0 deletions mm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@
# Makefile for the linux memory manager.
#

KASAN_SANITIZE_slab_common.o := n
KASAN_SANITIZE_slub.o := n

mmu-y := nommu.o
mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
Expand Down
98 changes: 98 additions & 0 deletions mm/kasan/kasan.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
#include <linux/kasan.h>

#include "kasan.h"
#include "../slab.h"

/*
* Poisons the shadow memory for 'size' bytes starting from 'addr'.
Expand Down Expand Up @@ -268,6 +269,103 @@ void kasan_free_pages(struct page *page, unsigned int order)
KASAN_FREE_PAGE);
}

void kasan_poison_slab(struct page *page)
{
kasan_poison_shadow(page_address(page),
PAGE_SIZE << compound_order(page),
KASAN_KMALLOC_REDZONE);
}

void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
{
kasan_unpoison_shadow(object, cache->object_size);
}

void kasan_poison_object_data(struct kmem_cache *cache, void *object)
{
kasan_poison_shadow(object,
round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
KASAN_KMALLOC_REDZONE);
}

void kasan_slab_alloc(struct kmem_cache *cache, void *object)
{
kasan_kmalloc(cache, object, cache->object_size);
}

void kasan_slab_free(struct kmem_cache *cache, void *object)
{
unsigned long size = cache->object_size;
unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);

/* RCU slabs could be legally used after free within the RCU period */
if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
return;

kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
}

void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
{
unsigned long redzone_start;
unsigned long redzone_end;

if (unlikely(object == NULL))
return;

redzone_start = round_up((unsigned long)(object + size),
KASAN_SHADOW_SCALE_SIZE);
redzone_end = round_up((unsigned long)object + cache->object_size,
KASAN_SHADOW_SCALE_SIZE);

kasan_unpoison_shadow(object, size);
kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
KASAN_KMALLOC_REDZONE);
}
EXPORT_SYMBOL(kasan_kmalloc);

void kasan_kmalloc_large(const void *ptr, size_t size)
{
struct page *page;
unsigned long redzone_start;
unsigned long redzone_end;

if (unlikely(ptr == NULL))
return;

page = virt_to_page(ptr);
redzone_start = round_up((unsigned long)(ptr + size),
KASAN_SHADOW_SCALE_SIZE);
redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));

kasan_unpoison_shadow(ptr, size);
kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
KASAN_PAGE_REDZONE);
}

void kasan_krealloc(const void *object, size_t size)
{
struct page *page;

if (unlikely(object == ZERO_SIZE_PTR))
return;

page = virt_to_head_page(object);

if (unlikely(!PageSlab(page)))
kasan_kmalloc_large(object, size);
else
kasan_kmalloc(page->slab_cache, object, size);
}

void kasan_kfree_large(const void *ptr)
{
struct page *page = virt_to_page(ptr);

kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
KASAN_FREE_PAGE);
}

#define DEFINE_ASAN_LOAD_STORE(size) \
void __asan_load##size(unsigned long addr) \
{ \
Expand Down
5 changes: 5 additions & 0 deletions mm/kasan/kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,11 @@
#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)

#define KASAN_FREE_PAGE 0xFF /* page was freed */
#define KASAN_FREE_PAGE 0xFF /* page was freed */
#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */


struct kasan_access_info {
const void *access_addr;
Expand Down
21 changes: 21 additions & 0 deletions mm/kasan/report.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include <linux/kasan.h>

#include "kasan.h"
#include "../slab.h"

/* Shadow layout customization. */
#define SHADOW_BYTES_PER_BLOCK 1
Expand Down Expand Up @@ -55,8 +56,11 @@ static void print_error_description(struct kasan_access_info *info)

switch (shadow_val) {
case KASAN_FREE_PAGE:
case KASAN_KMALLOC_FREE:
bug_type = "use after free";
break;
case KASAN_PAGE_REDZONE:
case KASAN_KMALLOC_REDZONE:
case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
bug_type = "out of bounds access";
break;
Expand All @@ -77,6 +81,23 @@ static void print_address_description(struct kasan_access_info *info)
if ((addr >= (void *)PAGE_OFFSET) &&
(addr < high_memory)) {
struct page *page = virt_to_head_page(addr);

if (PageSlab(page)) {
void *object;
struct kmem_cache *cache = page->slab_cache;
void *last_object;

object = virt_to_obj(cache, page_address(page), addr);
last_object = page_address(page) +
page->objects * cache->size;

if (unlikely(object > last_object))
object = last_object; /* we hit into padding */

object_err(cache, page, object,
"kasan: bad access detected");
return;
}
dump_page(page, "kasan: bad access detected");
}

Expand Down
5 changes: 4 additions & 1 deletion mm/slab_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -898,6 +898,7 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
page = alloc_kmem_pages(flags, order);
ret = page ? page_address(page) : NULL;
kmemleak_alloc(ret, size, 1, flags);
kasan_kmalloc_large(ret, size);
return ret;
}
EXPORT_SYMBOL(kmalloc_order);
Expand Down Expand Up @@ -1077,8 +1078,10 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
if (p)
ks = ksize(p);

if (ks >= new_size)
if (ks >= new_size) {
kasan_krealloc((void *)p, new_size);
return (void *)p;
}

ret = kmalloc_track_caller(new_size, flags);
if (ret && p)
Expand Down
31 changes: 29 additions & 2 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1251,11 +1251,13 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
{
kmemleak_alloc(ptr, size, 1, flags);
kasan_kmalloc_large(ptr, size);
}

static inline void kfree_hook(const void *x)
{
kmemleak_free(x);
kasan_kfree_large(x);
}

static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
Expand All @@ -1278,6 +1280,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
memcg_kmem_put_cache(s);
kasan_slab_alloc(s, object);
}

static inline void slab_free_hook(struct kmem_cache *s, void *x)
Expand All @@ -1301,6 +1304,8 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
#endif
if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(x, s->object_size);

kasan_slab_free(s, x);
}

/*
Expand Down Expand Up @@ -1395,8 +1400,11 @@ static void setup_object(struct kmem_cache *s, struct page *page,
void *object)
{
setup_object_debug(s, page, object);
if (unlikely(s->ctor))
if (unlikely(s->ctor)) {
kasan_unpoison_object_data(s, object);
s->ctor(object);
kasan_poison_object_data(s, object);
}
}

static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
Expand Down Expand Up @@ -1429,6 +1437,8 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
if (unlikely(s->flags & SLAB_POISON))
memset(start, POISON_INUSE, PAGE_SIZE << order);

kasan_poison_slab(page);

for_each_object_idx(p, idx, s, start, page->objects) {
setup_object(s, page, p);
if (likely(idx < page->objects))
Expand Down Expand Up @@ -2522,6 +2532,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = slab_alloc(s, gfpflags, _RET_IP_);
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
kasan_kmalloc(s, ret, size);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);
Expand All @@ -2548,6 +2559,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,

trace_kmalloc_node(_RET_IP_, ret,
size, s->size, gfpflags, node);

kasan_kmalloc(s, ret, size);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
Expand Down Expand Up @@ -2933,6 +2946,7 @@ static void early_kmem_cache_node_alloc(int node)
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
init_tracking(kmem_cache_node, n);
#endif
kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node));
init_kmem_cache_node(n);
inc_slabs_node(kmem_cache_node, node, page->objects);

Expand Down Expand Up @@ -3305,6 +3319,8 @@ void *__kmalloc(size_t size, gfp_t flags)

trace_kmalloc(_RET_IP_, ret, size, s->size, flags);

kasan_kmalloc(s, ret, size);

return ret;
}
EXPORT_SYMBOL(__kmalloc);
Expand Down Expand Up @@ -3348,12 +3364,14 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)

trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);

kasan_kmalloc(s, ret, size);

return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
#endif

size_t ksize(const void *object)
static size_t __ksize(const void *object)
{
struct page *page;

Expand All @@ -3369,6 +3387,15 @@ size_t ksize(const void *object)

return slab_ksize(page->slab_cache);
}

size_t ksize(const void *object)
{
size_t size = __ksize(object);
/* We assume that ksize callers could use whole allocated area,
so we need unpoison this area. */
kasan_krealloc(object, size);
return size;
}
EXPORT_SYMBOL(ksize);

void kfree(const void *x)
Expand Down

0 comments on commit 0316bec

Please sign in to comment.