Skip to content

Commit

Permalink
kmemcheck: add hooks for the page allocator
Browse files Browse the repository at this point in the history
This adds support for tracking the initializedness of memory that
was allocated with the page allocator. Highmem requests are not
tracked.

Cc: Dave Hansen <[email protected]>
Acked-by: Pekka Enberg <[email protected]>

[build fix for !CONFIG_KMEMCHECK]
Signed-off-by: Ingo Molnar <[email protected]>

[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <[email protected]>
  • Loading branch information
vegard committed Jun 15, 2009
1 parent 9b5cab3 commit b1eeab6
Show file tree
Hide file tree
Showing 8 changed files with 122 additions and 31 deletions.
4 changes: 2 additions & 2 deletions arch/x86/include/asm/thread_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -154,9 +154,9 @@ struct thread_info {

/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
#else
#define THREAD_FLAGS GFP_KERNEL
#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK)
#endif

#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
Expand Down
8 changes: 8 additions & 0 deletions arch/x86/mm/kmemcheck/shadow.c
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,14 @@ void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n)
kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE);
}

void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n)
{
unsigned int i;

for (i = 0; i < n; ++i)
kmemcheck_mark_initialized(page_address(&p[i]), PAGE_SIZE);
}

enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size)
{
uint8_t *x;
Expand Down
5 changes: 5 additions & 0 deletions include/linux/gfp.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,12 @@ struct vm_area_struct;
#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
#define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */

#ifdef CONFIG_KMEMCHECK
#define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */
#else
#define __GFP_NOTRACK ((__force gfp_t)0)
#endif

/*
* This may seem redundant, but it's a way of annotating false positives vs.
Expand Down
35 changes: 29 additions & 6 deletions include/linux/kmemcheck.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,15 @@
extern int kmemcheck_enabled;

/* The slab-related functions. */
void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
struct page *page, int order);
void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order);
void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
void kmemcheck_free_shadow(struct page *page, int order);
void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
size_t size);
void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);

void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
gfp_t gfpflags);

void kmemcheck_show_pages(struct page *p, unsigned int n);
void kmemcheck_hide_pages(struct page *p, unsigned int n);

Expand All @@ -27,20 +29,20 @@ void kmemcheck_mark_freed(void *address, unsigned int n);

void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);

int kmemcheck_show_addr(unsigned long address);
int kmemcheck_hide_addr(unsigned long address);
#else
#define kmemcheck_enabled 0

static inline void
kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
struct page *page, int order)
kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
{
}

static inline void
kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order)
kmemcheck_free_shadow(struct page *page, int order)
{
}

Expand All @@ -55,6 +57,11 @@ static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
{
}

static inline void kmemcheck_pagealloc_alloc(struct page *p,
unsigned int order, gfp_t gfpflags)
{
}

static inline bool kmemcheck_page_is_tracked(struct page *p)
{
return false;
Expand All @@ -75,6 +82,22 @@ static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
static inline void kmemcheck_mark_freed(void *address, unsigned int n)
{
}

static inline void kmemcheck_mark_unallocated_pages(struct page *p,
unsigned int n)
{
}

static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
unsigned int n)
{
}

static inline void kmemcheck_mark_initialized_pages(struct page *p,
unsigned int n)
{
}

#endif /* CONFIG_KMEMCHECK */

#endif /* LINUX_KMEMCHECK_H */
45 changes: 32 additions & 13 deletions mm/kmemcheck.c
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
#include <linux/gfp.h>
#include <linux/mm_types.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/kmemcheck.h>

void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
struct page *page, int order)
void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
{
struct page *shadow;
int pages;
Expand All @@ -16,7 +16,7 @@ void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
* With kmemcheck enabled, we need to allocate a memory area for the
* shadow bits as well.
*/
shadow = alloc_pages_node(node, flags, order);
shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
if (!shadow) {
if (printk_ratelimit())
printk(KERN_ERR "kmemcheck: failed to allocate "
Expand All @@ -33,23 +33,17 @@ void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
* the memory accesses.
*/
kmemcheck_hide_pages(page, pages);

/*
* Objects from caches that have a constructor don't get
* cleared when they're allocated, so we need to do it here.
*/
if (s->ctor)
kmemcheck_mark_uninitialized_pages(page, pages);
else
kmemcheck_mark_unallocated_pages(page, pages);
}

void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order)
void kmemcheck_free_shadow(struct page *page, int order)
{
struct page *shadow;
int pages;
int i;

if (!kmemcheck_page_is_tracked(page))
return;

pages = 1 << order;

kmemcheck_show_pages(page, pages);
Expand Down Expand Up @@ -101,3 +95,28 @@ void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
kmemcheck_mark_freed(object, size);
}

void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
gfp_t gfpflags)
{
int pages;

if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
return;

pages = 1 << order;

/*
* NOTE: We choose to track GFP_ZERO pages too; in fact, they
* can become uninitialized by copying uninitialized memory
* into them.
*/

/* XXX: Can use zone->node for node? */
kmemcheck_alloc_shadow(page, order, gfpflags, -1);

if (gfpflags & __GFP_ZERO)
kmemcheck_mark_initialized_pages(page, pages);
else
kmemcheck_mark_uninitialized_pages(page, pages);
}
18 changes: 18 additions & 0 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include <linux/bootmem.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/kmemcheck.h>
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
Expand Down Expand Up @@ -546,6 +547,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
int i;
int bad = 0;

kmemcheck_free_shadow(page, order);

for (i = 0 ; i < (1 << order) ; ++i)
bad += free_pages_check(page + i);
if (bad)
Expand Down Expand Up @@ -994,6 +997,8 @@ static void free_hot_cold_page(struct page *page, int cold)
struct per_cpu_pages *pcp;
unsigned long flags;

kmemcheck_free_shadow(page, 0);

if (PageAnon(page))
page->mapping = NULL;
if (free_pages_check(page))
Expand Down Expand Up @@ -1047,6 +1052,16 @@ void split_page(struct page *page, unsigned int order)

VM_BUG_ON(PageCompound(page));
VM_BUG_ON(!page_count(page));

#ifdef CONFIG_KMEMCHECK
/*
* Split shadow pages too, because free(page[0]) would
* otherwise free the whole shadow.
*/
if (kmemcheck_page_is_tracked(page))
split_page(virt_to_page(page[0].shadow), order);
#endif

for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i);
}
Expand Down Expand Up @@ -1667,7 +1682,10 @@ __alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
dump_stack();
show_mem();
}
return page;
got_pg:
if (kmemcheck_enabled)
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
return page;
}
EXPORT_SYMBOL(__alloc_pages_internal);
Expand Down
15 changes: 10 additions & 5 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -1612,7 +1612,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
flags |= __GFP_RECLAIMABLE;

page = alloc_pages_node(nodeid, flags, cachep->gfporder);
page = alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
if (!page)
return NULL;

Expand All @@ -1626,8 +1626,14 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
for (i = 0; i < nr_pages; i++)
__SetPageSlab(page + i);

if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK))
kmemcheck_alloc_shadow(cachep, flags, nodeid, page, cachep->gfporder);
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);

if (cachep->ctor)
kmemcheck_mark_uninitialized_pages(page, nr_pages);
else
kmemcheck_mark_unallocated_pages(page, nr_pages);
}

return page_address(page);
}
Expand All @@ -1641,8 +1647,7 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
struct page *page = virt_to_page(addr);
const unsigned long nr_freed = i;

if (kmemcheck_page_is_tracked(page))
kmemcheck_free_shadow(cachep, page, cachep->gfporder);
kmemcheck_free_shadow(page, cachep->gfporder);

if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
sub_zone_page_state(page_zone(page),
Expand Down
23 changes: 18 additions & 5 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1066,6 +1066,8 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node,
{
int order = oo_order(oo);

flags |= __GFP_NOTRACK;

if (node == -1)
return alloc_pages(flags, order);
else
Expand Down Expand Up @@ -1097,7 +1099,18 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if (kmemcheck_enabled
&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS)))
{
kmemcheck_alloc_shadow(s, flags, node, page, compound_order(page));
int pages = 1 << oo_order(oo);

kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);

/*
* Objects from caches that have a constructor don't get
* cleared when they're allocated, so we need to do it here.
*/
if (s->ctor)
kmemcheck_mark_uninitialized_pages(page, pages);
else
kmemcheck_mark_unallocated_pages(page, pages);
}

page->objects = oo_objects(oo);
Expand Down Expand Up @@ -1173,8 +1186,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
__ClearPageSlubDebug(page);
}

if (kmemcheck_page_is_tracked(page))
kmemcheck_free_shadow(s, page, compound_order(page));
kmemcheck_free_shadow(page, compound_order(page));

mod_zone_page_state(page_zone(page),
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
Expand Down Expand Up @@ -2734,9 +2746,10 @@ EXPORT_SYMBOL(__kmalloc);

static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
struct page *page = alloc_pages_node(node, flags | __GFP_COMP,
get_order(size));
struct page *page;

flags |= __GFP_COMP | __GFP_NOTRACK;
page = alloc_pages_node(node, flags, get_order(size));
if (page)
return page_address(page);
else
Expand Down

0 comments on commit b1eeab6

Please sign in to comment.