Skip to content

Commit

Permalink
generic debug pagealloc
Browse files Browse the repository at this point in the history
CONFIG_DEBUG_PAGEALLOC is now supported by x86, powerpc, sparc64, and
s390.  This patch implements it for the rest of the architectures by
filling the pages with poison byte patterns after free_pages() and
verifying the poison patterns before alloc_pages().

This generic one cannot detect invalid page accesses immediately but
invalid read access may cause invalid dereference by poisoned memory and
invalid write access can be detected after a long delay.

Signed-off-by: Akinobu Mita <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
mita authored and torvalds committed Apr 1, 2009
1 parent 610a77e commit 6a11f75
Show file tree
Hide file tree
Showing 16 changed files with 202 additions and 19 deletions.
18 changes: 0 additions & 18 deletions arch/avr32/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -250,21 +250,3 @@ asmlinkage void do_bus_error(unsigned long addr, int write_access,
dump_dtlb();
die("Bus Error", regs, SIGKILL);
}

/*
* This functionality is currently not possible to implement because
* we're using segmentation to ensure a fixed mapping of the kernel
* virtual address space.
*
* It would be possible to implement this, but it would require us to
* disable segmentation at startup and load the kernel mappings into
* the TLB like any other pages. There will be lots of trickery to
* avoid recursive invocation of the TLB miss handler, though...
*/
#ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages(struct page *page, int numpages, int enable)
{

}
EXPORT_SYMBOL(kernel_map_pages);
#endif
3 changes: 3 additions & 0 deletions arch/powerpc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,9 @@ config PPC_OF_PLATFORM_PCI
depends on PPC64 # not supported on 32 bits yet
default n

config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y

source "init/Kconfig"

source "kernel/Kconfig.freezer"
Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ config DEBUG_STACK_USAGE
config DEBUG_PAGEALLOC
bool "Debug page memory allocations"
depends on DEBUG_KERNEL && !HIBERNATION
depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC
help
Unmap pages from the kernel linear mapping after free_pages().
This results in a large slowdown, but helps to find certain types
Expand Down
3 changes: 3 additions & 0 deletions arch/s390/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,9 @@ config PGSTE
config VIRT_CPU_ACCOUNTING
def_bool y

config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y

mainmenu "Linux Kernel Configuration"

config S390
Expand Down
1 change: 1 addition & 0 deletions arch/s390/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ source "lib/Kconfig.debug"
config DEBUG_PAGEALLOC
bool "Debug page memory allocations"
depends on DEBUG_KERNEL
depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC
help
Unmap pages from the kernel linear mapping after free_pages().
This results in a slowdown, but helps to find certain types of
Expand Down
3 changes: 3 additions & 0 deletions arch/sparc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,9 @@ config ARCH_NO_VIRT_TO_BUS
config OF
def_bool y

config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y if SPARC64

source "init/Kconfig"

source "kernel/Kconfig.freezer"
Expand Down
3 changes: 2 additions & 1 deletion arch/sparc/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ config STACK_DEBUG

config DEBUG_PAGEALLOC
bool "Debug page memory allocations"
depends on SPARC64 && DEBUG_KERNEL && !HIBERNATION
depends on DEBUG_KERNEL && !HIBERNATION
depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC
help
Unmap pages from the kernel linear mapping after free_pages().
This results in a large slowdown, but helps to find certain types
Expand Down
3 changes: 3 additions & 0 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,9 @@ config AUDIT_ARCH
config ARCH_SUPPORTS_OPTIMIZED_INLINING
def_bool y

config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y

# Use the generic interrupt handling code in kernel/irq/:
config GENERIC_HARDIRQS
bool
Expand Down
1 change: 1 addition & 0 deletions arch/x86/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ config DEBUG_STACK_USAGE
config DEBUG_PAGEALLOC
bool "Debug page memory allocations"
depends on DEBUG_KERNEL
depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC
---help---
Unmap pages from the kernel linear mapping after free_pages().
This results in a large slowdown, but helps to find certain types
Expand Down
4 changes: 4 additions & 0 deletions include/linux/mm_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include <linux/rwsem.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/page-debug-flags.h>
#include <asm/page.h>
#include <asm/mmu.h>

Expand Down Expand Up @@ -174,6 +175,9 @@ struct vm_area_struct {
#ifdef CONFIG_NUMA
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
unsigned long debug_flags; /* Use atomic bitops on this */
#endif
};

struct core_thread {
Expand Down
30 changes: 30 additions & 0 deletions include/linux/page-debug-flags.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#ifndef LINUX_PAGE_DEBUG_FLAGS_H
#define LINUX_PAGE_DEBUG_FLAGS_H

/*
* page->debug_flags bits:
*
* PAGE_DEBUG_FLAG_POISON is set for poisoned pages. This is used to
* implement generic debug pagealloc feature. The pages are filled with
* poison patterns and set this flag after free_pages(). The poisoned
* pages are verified whether the patterns are not corrupted and clear
* the flag before alloc_pages().
*/

enum page_debug_flags {
PAGE_DEBUG_FLAG_POISON, /* Page is poisoned */
};

/*
* Ensure that CONFIG_WANT_PAGE_DEBUG_FLAGS reliably
* gets turned off when no debug features are enabling it!
*/

#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
#if !defined(CONFIG_PAGE_POISONING) \
/* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */
#error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features!
#endif
#endif /* CONFIG_WANT_PAGE_DEBUG_FLAGS */

#endif /* LINUX_PAGE_DEBUG_FLAGS_H */
3 changes: 3 additions & 0 deletions include/linux/poison.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@
*/
#define TIMER_ENTRY_STATIC ((void *) 0x74737461)

/********** mm/debug-pagealloc.c **********/
#define PAGE_POISON 0xaa

/********** mm/slab.c **********/
/*
* Magic nums for obj red zoning.
Expand Down
1 change: 1 addition & 0 deletions lib/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -796,6 +796,7 @@ config SYSCTL_SYSCALL_CHECK
to properly maintain and use. This enables checks that help
you to keep things correct.

source mm/Kconfig.debug
source kernel/trace/Kconfig

config PROVIDE_OHCI1394_DMA_INIT
Expand Down
17 changes: 17 additions & 0 deletions mm/Kconfig.debug
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
config WANT_PAGE_DEBUG_FLAGS
bool

config PAGE_POISONING
bool "Debug page memory allocations"
depends on DEBUG_KERNEL && !ARCH_SUPPORTS_DEBUG_PAGEALLOC
depends on !HIBERNATION
select DEBUG_PAGEALLOC
select WANT_PAGE_DEBUG_FLAGS
help
Fill the pages with poison patterns after free_pages() and verify
the patterns before alloc_pages(). This results in a large slowdown,
but helps to find certain types of memory corruptions.

This option cannot enalbe with hibernation. Otherwise, it will get
wrong messages for memory corruption because the free pages are not
saved to the suspend image.
1 change: 1 addition & 0 deletions mm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
obj-$(CONFIG_TMPFS_POSIX_ACL) += shmem_acl.o
obj-$(CONFIG_SLOB) += slob.o
obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_SLUB) += slub.o
obj-$(CONFIG_FAILSLAB) += failslab.o
Expand Down
129 changes: 129 additions & 0 deletions mm/debug-pagealloc.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/page-debug-flags.h>
#include <linux/poison.h>

static inline void set_page_poison(struct page *page)
{
__set_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
}

static inline void clear_page_poison(struct page *page)
{
__clear_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
}

static inline bool page_poison(struct page *page)
{
return test_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
}

static void poison_highpage(struct page *page)
{
/*
* Page poisoning for highmem pages is not implemented.
*
* This can be called from interrupt contexts.
* So we need to create a new kmap_atomic slot for this
* application and it will need interrupt protection.
*/
}

static void poison_page(struct page *page)
{
void *addr;

if (PageHighMem(page)) {
poison_highpage(page);
return;
}
set_page_poison(page);
addr = page_address(page);
memset(addr, PAGE_POISON, PAGE_SIZE);
}

static void poison_pages(struct page *page, int n)
{
int i;

for (i = 0; i < n; i++)
poison_page(page + i);
}

static bool single_bit_flip(unsigned char a, unsigned char b)
{
unsigned char error = a ^ b;

return error && !(error & (error - 1));
}

static void check_poison_mem(unsigned char *mem, size_t bytes)
{
unsigned char *start;
unsigned char *end;

for (start = mem; start < mem + bytes; start++) {
if (*start != PAGE_POISON)
break;
}
if (start == mem + bytes)
return;

for (end = mem + bytes - 1; end > start; end--) {
if (*end != PAGE_POISON)
break;
}

if (!printk_ratelimit())
return;
else if (start == end && single_bit_flip(*start, PAGE_POISON))
printk(KERN_ERR "pagealloc: single bit error\n");
else
printk(KERN_ERR "pagealloc: memory corruption\n");

print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
end - start + 1, 1);
dump_stack();
}

static void unpoison_highpage(struct page *page)
{
/*
* See comment in poison_highpage().
* Highmem pages should not be poisoned for now
*/
BUG_ON(page_poison(page));
}

static void unpoison_page(struct page *page)
{
if (PageHighMem(page)) {
unpoison_highpage(page);
return;
}
if (page_poison(page)) {
void *addr = page_address(page);

check_poison_mem(addr, PAGE_SIZE);
clear_page_poison(page);
}
}

static void unpoison_pages(struct page *page, int n)
{
int i;

for (i = 0; i < n; i++)
unpoison_page(page + i);
}

void kernel_map_pages(struct page *page, int numpages, int enable)
{
if (!debug_pagealloc_enabled)
return;

if (enable)
unpoison_pages(page, numpages);
else
poison_pages(page, numpages);
}

0 comments on commit 6a11f75

Please sign in to comment.