Skip to content

Commit

Permalink
kasan, mm: reset tags when accessing metadata
Browse files Browse the repository at this point in the history
Kernel allocator code accesses metadata for slab objects, that may lie
out-of-bounds of the object itself, or be accessed when an object is
freed.  Such accesses trigger tag faults and lead to false-positive
reports with hardware tag-based KASAN.

Software KASAN modes disable instrumentation for allocator code via
KASAN_SANITIZE Makefile macro, and rely on kasan_enable/disable_current()
annotations which are used to ignore KASAN reports.

With hardware tag-based KASAN neither of those options are available, as
it doesn't use compiler instrumetation, no tag faults are ignored, and MTE
is disabled after the first one.

Instead, reset tags when accessing metadata (currently only for SLUB).

Link: https://lkml.kernel.org/r/a0f3cefbc49f34c843b664110842de4db28179d0.1606161801.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <[email protected]>
Signed-off-by: Vincenzo Frascino <[email protected]>
Acked-by: Marco Elver <[email protected]>
Reviewed-by: Alexander Potapenko <[email protected]>
Tested-by: Vincenzo Frascino <[email protected]>
Cc: Andrey Ryabinin <[email protected]>
Cc: Branislav Rankov <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Evgenii Stepanov <[email protected]>
Cc: Kevin Brodsky <[email protected]>
Cc: Vasily Gorbik <[email protected]>
Cc: Will Deacon <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
xairy authored and torvalds committed Dec 22, 2020
1 parent 4291e9e commit aa1ef4d
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 15 deletions.
4 changes: 3 additions & 1 deletion mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1204,8 +1204,10 @@ static void kernel_init_free_pages(struct page *page, int numpages)

/* s390's use of memset() could override KASAN redzones. */
kasan_disable_current();
for (i = 0; i < numpages; i++)
for (i = 0; i < numpages; i++) {
page_kasan_tag_reset(page + i);
clear_highpage(page + i);
}
kasan_enable_current();
}

Expand Down
2 changes: 1 addition & 1 deletion mm/page_poison.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ static void poison_page(struct page *page)

/* KASAN still think the page is in-use, so skip it. */
kasan_disable_current();
memset(addr, PAGE_POISON, PAGE_SIZE);
memset(kasan_reset_tag(addr), PAGE_POISON, PAGE_SIZE);
kasan_enable_current();
kunmap_atomic(addr);
}
Expand Down
29 changes: 16 additions & 13 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
{
#ifdef CONFIG_SLAB_FREELIST_HARDENED
/*
* When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged.
* When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged.
* Normally, this doesn't cause any issues, as both set_freepointer()
* and get_freepointer() are called with a pointer with the same tag.
* However, there are some issues with CONFIG_SLUB_DEBUG code. For
Expand All @@ -275,6 +275,7 @@ static inline void *freelist_dereference(const struct kmem_cache *s,

static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
object = kasan_reset_tag(object);
return freelist_dereference(s, object + s->offset);
}

Expand Down Expand Up @@ -304,6 +305,7 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
BUG_ON(object == fp); /* naive detection of double free or corruption */
#endif

freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
}

Expand Down Expand Up @@ -538,8 +540,8 @@ static void print_section(char *level, char *text, u8 *addr,
unsigned int length)
{
metadata_access_enable();
print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
length, 1);
print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS,
16, 1, addr, length, 1);
metadata_access_disable();
}

Expand Down Expand Up @@ -570,7 +572,7 @@ static struct track *get_track(struct kmem_cache *s, void *object,

p = object + get_info_end(s);

return p + alloc;
return kasan_reset_tag(p + alloc);
}

static void set_track(struct kmem_cache *s, void *object,
Expand All @@ -583,7 +585,8 @@ static void set_track(struct kmem_cache *s, void *object,
unsigned int nr_entries;

metadata_access_enable();
nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3);
nr_entries = stack_trace_save(kasan_reset_tag(p->addrs),
TRACK_ADDRS_COUNT, 3);
metadata_access_disable();

if (nr_entries < TRACK_ADDRS_COUNT)
Expand Down Expand Up @@ -747,7 +750,7 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,

static void init_object(struct kmem_cache *s, void *object, u8 val)
{
u8 *p = object;
u8 *p = kasan_reset_tag(object);

if (s->flags & SLAB_RED_ZONE)
memset(p - s->red_left_pad, val, s->red_left_pad);
Expand Down Expand Up @@ -777,7 +780,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
u8 *addr = page_address(page);

metadata_access_enable();
fault = memchr_inv(start, value, bytes);
fault = memchr_inv(kasan_reset_tag(start), value, bytes);
metadata_access_disable();
if (!fault)
return 1;
Expand Down Expand Up @@ -873,7 +876,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)

pad = end - remainder;
metadata_access_enable();
fault = memchr_inv(pad, POISON_INUSE, remainder);
fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
metadata_access_disable();
if (!fault)
return 1;
Expand Down Expand Up @@ -1118,7 +1121,7 @@ void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
return;

metadata_access_enable();
memset(addr, POISON_INUSE, page_size(page));
memset(kasan_reset_tag(addr), POISON_INUSE, page_size(page));
metadata_access_disable();
}

Expand Down Expand Up @@ -1566,10 +1569,10 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
* Clear the object and the metadata, but don't touch
* the redzone.
*/
memset(object, 0, s->object_size);
memset(kasan_reset_tag(object), 0, s->object_size);
rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad
: 0;
memset((char *)object + s->inuse, 0,
memset((char *)kasan_reset_tag(object) + s->inuse, 0,
s->size - s->inuse - rsize);

}
Expand Down Expand Up @@ -2881,10 +2884,10 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
stat(s, ALLOC_FASTPATH);
}

maybe_wipe_obj_freeptr(s, object);
maybe_wipe_obj_freeptr(s, kasan_reset_tag(object));

if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
memset(object, 0, s->object_size);
memset(kasan_reset_tag(object), 0, s->object_size);

slab_post_alloc_hook(s, objcg, gfpflags, 1, &object);

Expand Down

0 comments on commit aa1ef4d

Please sign in to comment.