Skip to content

Commit

Permalink
Merge branch 'for-linus2' of git://git.kernel.org/pub/scm/linux/kerne…
Browse files Browse the repository at this point in the history
…l/git/vegard/kmemcheck

* 'for-linus2' of git://git.kernel.org/pub/scm/linux/kernel/git/vegard/kmemcheck: (39 commits)
  signal: fix __send_signal() false positive kmemcheck warning
  fs: fix do_mount_root() false positive kmemcheck warning
  fs: introduce __getname_gfp()
  trace: annotate bitfields in struct ring_buffer_event
  net: annotate struct sock bitfield
  c2port: annotate bitfield for kmemcheck
  net: annotate inet_timewait_sock bitfields
  ieee1394/csr1212: fix false positive kmemcheck report
  ieee1394: annotate bitfield
  net: annotate bitfields in struct inet_sock
  net: use kmemcheck bitfields API for skbuff
  kmemcheck: introduce bitfield API
  kmemcheck: add opcode self-testing at boot
  x86: unify pte_hidden
  x86: make _PAGE_HIDDEN conditional
  kmemcheck: make kconfig accessible for other architectures
  kmemcheck: enable in the x86 Kconfig
  kmemcheck: add hooks for the page allocator
  kmemcheck: add hooks for page- and sg-dma-mappings
  kmemcheck: don't track page tables
  ...
  • Loading branch information
torvalds committed Jun 16, 2009
2 parents e1f5b94 + 722f2a6 commit b3fec0f
Show file tree
Hide file tree
Showing 71 changed files with 2,899 additions and 128 deletions.
773 changes: 773 additions & 0 deletions Documentation/kmemcheck.txt

Large diffs are not rendered by default.

8 changes: 8 additions & 0 deletions MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -3406,6 +3406,14 @@ F: drivers/serial/kgdboc.c
F: include/linux/kgdb.h
F: kernel/kgdb.c

KMEMCHECK
P: Vegard Nossum
M: [email protected]
P Pekka Enberg
M: [email protected]
L: [email protected]
S: Maintained

KMEMLEAK
P: Catalin Marinas
M: [email protected]
Expand Down
1 change: 1 addition & 0 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ config X86
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
select HAVE_ARCH_KMEMCHECK

config OUTPUT_FORMAT
string
Expand Down
5 changes: 5 additions & 0 deletions arch/x86/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,11 @@ ifdef CONFIG_CC_STACKPROTECTOR
endif
endif

# Don't unroll struct assignments with kmemcheck enabled
ifeq ($(CONFIG_KMEMCHECK),y)
KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
endif

# Stackpointer is addressed different for 32 bit and 64 bit x86
sp-$(CONFIG_X86_32) := esp
sp-$(CONFIG_X86_64) := rsp
Expand Down
7 changes: 7 additions & 0 deletions arch/x86/include/asm/dma-mapping.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
* Documentation/DMA-API.txt for documentation.
*/

#include <linux/kmemcheck.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/dma-attrs.h>
Expand Down Expand Up @@ -60,6 +61,7 @@ dma_map_single(struct device *hwdev, void *ptr, size_t size,
dma_addr_t addr;

BUG_ON(!valid_dma_direction(dir));
kmemcheck_mark_initialized(ptr, size);
addr = ops->map_page(hwdev, virt_to_page(ptr),
(unsigned long)ptr & ~PAGE_MASK, size,
dir, NULL);
Expand Down Expand Up @@ -87,8 +89,12 @@ dma_map_sg(struct device *hwdev, struct scatterlist *sg,
{
struct dma_map_ops *ops = get_dma_ops(hwdev);
int ents;
struct scatterlist *s;
int i;

BUG_ON(!valid_dma_direction(dir));
for_each_sg(sg, s, nents, i)
kmemcheck_mark_initialized(sg_virt(s), s->length);
ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
debug_dma_map_sg(hwdev, sg, nents, ents, dir);

Expand Down Expand Up @@ -200,6 +206,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
dma_addr_t addr;

BUG_ON(!valid_dma_direction(dir));
kmemcheck_mark_initialized(page_address(page) + offset, size);
addr = ops->map_page(dev, page, offset, size, dir, NULL);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);

Expand Down
42 changes: 42 additions & 0 deletions arch/x86/include/asm/kmemcheck.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
#ifndef ASM_X86_KMEMCHECK_H
#define ASM_X86_KMEMCHECK_H

#include <linux/types.h>
#include <asm/ptrace.h>

#ifdef CONFIG_KMEMCHECK
bool kmemcheck_active(struct pt_regs *regs);

void kmemcheck_show(struct pt_regs *regs);
void kmemcheck_hide(struct pt_regs *regs);

bool kmemcheck_fault(struct pt_regs *regs,
unsigned long address, unsigned long error_code);
bool kmemcheck_trap(struct pt_regs *regs);
#else
static inline bool kmemcheck_active(struct pt_regs *regs)
{
return false;
}

static inline void kmemcheck_show(struct pt_regs *regs)
{
}

static inline void kmemcheck_hide(struct pt_regs *regs)
{
}

static inline bool kmemcheck_fault(struct pt_regs *regs,
unsigned long address, unsigned long error_code)
{
return false;
}

static inline bool kmemcheck_trap(struct pt_regs *regs)
{
return false;
}
#endif /* CONFIG_KMEMCHECK */

#endif
5 changes: 5 additions & 0 deletions arch/x86/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -317,6 +317,11 @@ static inline int pte_present(pte_t a)
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}

static inline int pte_hidden(pte_t pte)
{
return pte_flags(pte) & _PAGE_HIDDEN;
}

static inline int pmd_present(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_PRESENT;
Expand Down
9 changes: 7 additions & 2 deletions arch/x86/include/asm/pgtable_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
#define _PAGE_BIT_UNUSED3 11
#define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
Expand All @@ -41,13 +41,18 @@
#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
#define __HAVE_ARCH_PTE_SPECIAL

#ifdef CONFIG_KMEMCHECK
#define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
#else
#define _PAGE_HIDDEN (_AT(pteval_t, 0))
#endif

#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
#else
Expand Down
8 changes: 8 additions & 0 deletions arch/x86/include/asm/string_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -177,10 +177,18 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
* No 3D Now!
*/

#ifndef CONFIG_KMEMCHECK
#define memcpy(t, f, n) \
(__builtin_constant_p((n)) \
? __constant_memcpy((t), (f), (n)) \
: __memcpy((t), (f), (n)))
#else
/*
* kmemcheck becomes very happy if we use the REP instructions unconditionally,
* because it means that we know both memory operands in advance.
*/
#define memcpy(t, f, n) __memcpy((t), (f), (n))
#endif

#endif

Expand Down
8 changes: 8 additions & 0 deletions arch/x86/include/asm/string_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
function. */

#define __HAVE_ARCH_MEMCPY 1
#ifndef CONFIG_KMEMCHECK
#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
extern void *memcpy(void *to, const void *from, size_t len);
#else
Expand All @@ -42,6 +43,13 @@ extern void *__memcpy(void *to, const void *from, size_t len);
__ret; \
})
#endif
#else
/*
* kmemcheck becomes very happy if we use the REP instructions unconditionally,
* because it means that we know both memory operands in advance.
*/
#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
#endif

#define __HAVE_ARCH_MEMSET
void *memset(void *s, int c, size_t n);
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/include/asm/thread_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -154,9 +154,9 @@ struct thread_info {

/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
#else
#define THREAD_FLAGS GFP_KERNEL
#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK)
#endif

#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
Expand Down
5 changes: 5 additions & 0 deletions arch/x86/include/asm/xor.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
#ifdef CONFIG_KMEMCHECK
/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */
# include <asm-generic/xor.h>
#else
#ifdef CONFIG_X86_32
# include "xor_32.h"
#else
# include "xor_64.h"
#endif
#endif
23 changes: 23 additions & 0 deletions arch/x86/kernel/cpu/intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,29 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
*/
if (c->x86 == 6 && c->x86_model < 15)
clear_cpu_cap(c, X86_FEATURE_PAT);

#ifdef CONFIG_KMEMCHECK
/*
* P4s have a "fast strings" feature which causes single-
* stepping REP instructions to only generate a #DB on
* cache-line boundaries.
*
* Ingo Molnar reported a Pentium D (model 6) and a Xeon
* (model 2) with the same problem.
*/
if (c->x86 == 15) {
u64 misc_enable;

rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);

if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");

misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
}
}
#endif
}

#ifdef CONFIG_X86_32
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ void arch_task_cache_init(void)
task_xstate_cachep =
kmem_cache_create("task_xstate", xstate_size,
__alignof__(union thread_xstate),
SLAB_PANIC, NULL);
SLAB_PANIC | SLAB_NOTRACK, NULL);
}

/*
Expand Down
7 changes: 7 additions & 0 deletions arch/x86/kernel/stacktrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,13 @@ void save_stack_trace(struct stack_trace *trace)
}
EXPORT_SYMBOL_GPL(save_stack_trace);

void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp)
{
dump_trace(current, NULL, NULL, bp, &save_stack_ops, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}

void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
Expand Down
5 changes: 5 additions & 0 deletions arch/x86/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
#include <linux/edac.h>
#endif

#include <asm/kmemcheck.h>
#include <asm/stacktrace.h>
#include <asm/processor.h>
#include <asm/debugreg.h>
Expand Down Expand Up @@ -534,6 +535,10 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)

get_debugreg(condition, 6);

/* Catch kmemcheck conditions first of all! */
if (condition & DR_STEP && kmemcheck_trap(regs))
return;

/*
* The processor cleared BTF, so don't mark that we need it set.
*/
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/mm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o

obj-$(CONFIG_HIGHMEM) += highmem_32.o

obj-$(CONFIG_KMEMCHECK) += kmemcheck/

obj-$(CONFIG_MMIOTRACE) += mmiotrace.o
mmiotrace-y := kmmio.o pf_in.o mmio-mod.o
obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
Expand Down
18 changes: 15 additions & 3 deletions arch/x86/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

#include <asm/traps.h> /* dotraplinkage, ... */
#include <asm/pgalloc.h> /* pgd_*(), ... */
#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */

/*
* Page fault error code bits:
Expand Down Expand Up @@ -956,6 +957,13 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
/* Get the faulting address: */
address = read_cr2();

/*
* Detect and handle instructions that would cause a page fault for
* both a tracked kernel page and a userspace page.
*/
if (kmemcheck_active(regs))
kmemcheck_hide(regs);

if (unlikely(kmmio_fault(regs, address)))
return;

Expand All @@ -973,9 +981,13 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
* protection error (error_code & 9) == 0.
*/
if (unlikely(fault_in_kernel_space(address))) {
if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
vmalloc_fault(address) >= 0)
return;
if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
if (vmalloc_fault(address) >= 0)
return;

if (kmemcheck_fault(regs, address, error_code))
return;
}

/* Can handle a stale RO->RW TLB: */
if (spurious_fault(error_code, address))
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
if (!after_bootmem)
init_gbpages();

#ifdef CONFIG_DEBUG_PAGEALLOC
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
/*
* For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
* This will simplify cpa(), which otherwise needs to support splitting
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/mm/init_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
pte_t *page_table = NULL;

if (after_bootmem) {
#ifdef CONFIG_DEBUG_PAGEALLOC
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
#endif
if (!page_table)
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/mm/init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ static __ref void *spp_getpage(void)
void *ptr;

if (after_bootmem)
ptr = (void *) get_zeroed_page(GFP_ATOMIC);
ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
else
ptr = alloc_bootmem_pages(PAGE_SIZE);

Expand Down Expand Up @@ -281,7 +281,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
void *adr;

if (after_bootmem) {
adr = (void *)get_zeroed_page(GFP_ATOMIC);
adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
*phys = __pa(adr);

return adr;
Expand Down
1 change: 1 addition & 0 deletions arch/x86/mm/kmemcheck/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
obj-y := error.o kmemcheck.o opcode.o pte.o selftest.o shadow.o
Loading

0 comments on commit b3fec0f

Please sign in to comment.