Skip to content

Commit 55834c5

Browse files
ramosian-glidertorvalds
authored andcommitted
mm: kasan: initial memory quarantine implementation
Quarantine isolates freed objects in a separate queue. The objects are returned to the allocator later, which helps to detect use-after-free errors. When the object is freed, its state changes from KASAN_STATE_ALLOC to KASAN_STATE_QUARANTINE. The object is poisoned and put into quarantine instead of being returned to the allocator, therefore every subsequent access to that object triggers a KASAN error, and the error handler is able to say where the object has been allocated and deallocated. When it's time for the object to leave quarantine, its state becomes KASAN_STATE_FREE and it's returned to the allocator. From now on the allocator may reuse it for another allocation. Before that happens, it's still possible to detect a use-after free on that object (it retains the allocation/deallocation stacks). When the allocator reuses this object, the shadow is unpoisoned and old allocation/deallocation stacks are wiped. Therefore a use of this object, even an incorrect one, won't trigger ASan warning. Without the quarantine, it's not guaranteed that the objects aren't reused immediately, that's why the probability of catching a use-after-free is lower than with quarantine in place. Quarantine isolates freed objects in a separate queue. The objects are returned to the allocator later, which helps to detect use-after-free errors. Freed objects are first added to per-cpu quarantine queues. When a cache is destroyed or memory shrinking is requested, the objects are moved into the global quarantine queue. Whenever a kmalloc call allows memory reclaiming, the oldest objects are popped out of the global queue until the total size of objects in quarantine is less than 3/4 of the maximum quarantine size (which is a fraction of installed physical memory). As long as an object remains in the quarantine, KASAN is able to report accesses to it, so the chance of reporting a use-after-free is increased. Once the object leaves quarantine, the allocator may reuse it, in which case the object is unpoisoned and KASAN can't detect incorrect accesses to it. Right now quarantine support is only enabled in SLAB allocator. Unification of KASAN features in SLAB and SLUB will be done later. This patch is based on the "mm: kasan: quarantine" patch originally prepared by Dmitry Chernenkov. A number of improvements have been suggested by Andrey Ryabinin. [[email protected]: v9] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Alexander Potapenko <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Pekka Enberg <[email protected]> Cc: David Rientjes <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: Andrey Konovalov <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Andrey Ryabinin <[email protected]> Cc: Steven Rostedt <[email protected]> Cc: Konstantin Serebryany <[email protected]> Cc: Dmitry Chernenkov <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent b8f1a75 commit 55834c5

File tree

10 files changed

+387
-15
lines changed

10 files changed

+387
-15
lines changed

include/linux/kasan.h

+11-2
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,8 @@ void kasan_free_pages(struct page *page, unsigned int order);
5050

5151
void kasan_cache_create(struct kmem_cache *cache, size_t *size,
5252
unsigned long *flags);
53+
void kasan_cache_shrink(struct kmem_cache *cache);
54+
void kasan_cache_destroy(struct kmem_cache *cache);
5355

5456
void kasan_poison_slab(struct page *page);
5557
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
@@ -63,7 +65,8 @@ void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
6365
void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
6466

6567
void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
66-
void kasan_slab_free(struct kmem_cache *s, void *object);
68+
bool kasan_slab_free(struct kmem_cache *s, void *object);
69+
void kasan_poison_slab_free(struct kmem_cache *s, void *object);
6770

6871
struct kasan_cache {
6972
int alloc_meta_offset;
@@ -88,6 +91,8 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {}
8891
static inline void kasan_cache_create(struct kmem_cache *cache,
8992
size_t *size,
9093
unsigned long *flags) {}
94+
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
95+
static inline void kasan_cache_destroy(struct kmem_cache *cache) {}
9196

9297
static inline void kasan_poison_slab(struct page *page) {}
9398
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
@@ -105,7 +110,11 @@ static inline void kasan_krealloc(const void *object, size_t new_size,
105110

106111
static inline void kasan_slab_alloc(struct kmem_cache *s, void *object,
107112
gfp_t flags) {}
108-
static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
113+
static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
114+
{
115+
return false;
116+
}
117+
static inline void kasan_poison_slab_free(struct kmem_cache *s, void *object) {}
109118

110119
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
111120
static inline void kasan_free_shadow(const struct vm_struct *vm) {}

mm/kasan/Makefile

+1
Original file line numberDiff line numberDiff line change
@@ -8,3 +8,4 @@ CFLAGS_REMOVE_kasan.o = -pg
88
CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
99

1010
obj-y := kasan.o report.o kasan_init.o
11+
obj-$(CONFIG_SLAB) += quarantine.o

mm/kasan/kasan.c

+49-8
Original file line numberDiff line numberDiff line change
@@ -388,6 +388,16 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
388388
}
389389
#endif
390390

391+
void kasan_cache_shrink(struct kmem_cache *cache)
392+
{
393+
quarantine_remove_cache(cache);
394+
}
395+
396+
void kasan_cache_destroy(struct kmem_cache *cache)
397+
{
398+
quarantine_remove_cache(cache);
399+
}
400+
391401
void kasan_poison_slab(struct page *page)
392402
{
393403
kasan_poison_shadow(page_address(page),
@@ -482,7 +492,7 @@ void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
482492
kasan_kmalloc(cache, object, cache->object_size, flags);
483493
}
484494

485-
void kasan_slab_free(struct kmem_cache *cache, void *object)
495+
void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
486496
{
487497
unsigned long size = cache->object_size;
488498
unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
@@ -491,18 +501,43 @@ void kasan_slab_free(struct kmem_cache *cache, void *object)
491501
if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
492502
return;
493503

504+
kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
505+
}
506+
507+
bool kasan_slab_free(struct kmem_cache *cache, void *object)
508+
{
494509
#ifdef CONFIG_SLAB
495-
if (cache->flags & SLAB_KASAN) {
496-
struct kasan_free_meta *free_info =
497-
get_free_info(cache, object);
510+
/* RCU slabs could be legally used after free within the RCU period */
511+
if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
512+
return false;
513+
514+
if (likely(cache->flags & SLAB_KASAN)) {
498515
struct kasan_alloc_meta *alloc_info =
499516
get_alloc_info(cache, object);
500-
alloc_info->state = KASAN_STATE_FREE;
501-
set_track(&free_info->track, GFP_NOWAIT);
517+
struct kasan_free_meta *free_info =
518+
get_free_info(cache, object);
519+
520+
switch (alloc_info->state) {
521+
case KASAN_STATE_ALLOC:
522+
alloc_info->state = KASAN_STATE_QUARANTINE;
523+
quarantine_put(free_info, cache);
524+
set_track(&free_info->track, GFP_NOWAIT);
525+
kasan_poison_slab_free(cache, object);
526+
return true;
527+
case KASAN_STATE_QUARANTINE:
528+
case KASAN_STATE_FREE:
529+
pr_err("Double free");
530+
dump_stack();
531+
break;
532+
default:
533+
break;
534+
}
502535
}
536+
return false;
537+
#else
538+
kasan_poison_slab_free(cache, object);
539+
return false;
503540
#endif
504-
505-
kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
506541
}
507542

508543
void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
@@ -511,6 +546,9 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
511546
unsigned long redzone_start;
512547
unsigned long redzone_end;
513548

549+
if (flags & __GFP_RECLAIM)
550+
quarantine_reduce();
551+
514552
if (unlikely(object == NULL))
515553
return;
516554

@@ -541,6 +579,9 @@ void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
541579
unsigned long redzone_start;
542580
unsigned long redzone_end;
543581

582+
if (flags & __GFP_RECLAIM)
583+
quarantine_reduce();
584+
544585
if (unlikely(ptr == NULL))
545586
return;
546587

mm/kasan/kasan.h

+19-2
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@ struct kasan_global {
6262
enum kasan_state {
6363
KASAN_STATE_INIT,
6464
KASAN_STATE_ALLOC,
65+
KASAN_STATE_QUARANTINE,
6566
KASAN_STATE_FREE
6667
};
6768

@@ -79,9 +80,14 @@ struct kasan_alloc_meta {
7980
u32 reserved;
8081
};
8182

83+
struct qlist_node {
84+
struct qlist_node *next;
85+
};
8286
struct kasan_free_meta {
83-
/* Allocator freelist pointer, unused by KASAN. */
84-
void **freelist;
87+
/* This field is used while the object is in the quarantine.
88+
* Otherwise it might be used for the allocator freelist.
89+
*/
90+
struct qlist_node quarantine_link;
8591
struct kasan_track track;
8692
};
8793

@@ -105,4 +111,15 @@ static inline bool kasan_report_enabled(void)
105111
void kasan_report(unsigned long addr, size_t size,
106112
bool is_write, unsigned long ip);
107113

114+
#ifdef CONFIG_SLAB
115+
void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
116+
void quarantine_reduce(void);
117+
void quarantine_remove_cache(struct kmem_cache *cache);
118+
#else
119+
static inline void quarantine_put(struct kasan_free_meta *info,
120+
struct kmem_cache *cache) { }
121+
static inline void quarantine_reduce(void) { }
122+
static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
123+
#endif
124+
108125
#endif

0 commit comments

Comments
 (0)