Skip to content

Commit

Permalink
x86: add hooks for kmemcheck
Browse files Browse the repository at this point in the history
The hooks that we modify are:
- Page fault handler (to handle kmemcheck faults)
- Debug exception handler (to hide pages after single-stepping
  the instruction that caused the page fault)

Also redefine memset() to use the optimized version if kmemcheck is
enabled.

(Thanks to Pekka Enberg for minimizing the impact on the page fault
handler.)

As kmemcheck doesn't handle MMX/SSE instructions (yet), we also disable
the optimized xor code, and rely instead on the generic C implementation
in order to avoid false-positive warnings.

Signed-off-by: Vegard Nossum <[email protected]>

[whitespace fixlet]
Signed-off-by: Pekka Enberg <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>

[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <[email protected]>
  • Loading branch information
vegard committed Jun 15, 2009
1 parent f8b4ece commit f856129
Show file tree
Hide file tree
Showing 8 changed files with 66 additions and 5 deletions.
8 changes: 8 additions & 0 deletions arch/x86/include/asm/string_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -177,10 +177,18 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
* No 3D Now!
*/

#ifndef CONFIG_KMEMCHECK
#define memcpy(t, f, n) \
(__builtin_constant_p((n)) \
? __constant_memcpy((t), (f), (n)) \
: __memcpy((t), (f), (n)))
#else
/*
* kmemcheck becomes very happy if we use the REP instructions unconditionally,
* because it means that we know both memory operands in advance.
*/
#define memcpy(t, f, n) __memcpy((t), (f), (n))
#endif

#endif

Expand Down
8 changes: 8 additions & 0 deletions arch/x86/include/asm/string_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
function. */

#define __HAVE_ARCH_MEMCPY 1
#ifndef CONFIG_KMEMCHECK
#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
extern void *memcpy(void *to, const void *from, size_t len);
#else
Expand All @@ -42,6 +43,13 @@ extern void *__memcpy(void *to, const void *from, size_t len);
__ret; \
})
#endif
#else
/*
* kmemcheck becomes very happy if we use the REP instructions unconditionally,
* because it means that we know both memory operands in advance.
*/
#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
#endif

#define __HAVE_ARCH_MEMSET
void *memset(void *s, int c, size_t n);
Expand Down
5 changes: 5 additions & 0 deletions arch/x86/include/asm/xor.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
#ifdef CONFIG_KMEMCHECK
/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */
# include <asm-generic/xor.h>
#else
#ifdef CONFIG_X86_32
# include "xor_32.h"
#else
# include "xor_64.h"
#endif
#endif
23 changes: 23 additions & 0 deletions arch/x86/kernel/cpu/intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,29 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
*/
if (c->x86 == 6 && c->x86_model < 15)
clear_cpu_cap(c, X86_FEATURE_PAT);

#ifdef CONFIG_KMEMCHECK
/*
* P4s have a "fast strings" feature which causes single-
* stepping REP instructions to only generate a #DB on
* cache-line boundaries.
*
* Ingo Molnar reported a Pentium D (model 6) and a Xeon
* (model 2) with the same problem.
*/
if (c->x86 == 15) {
u64 misc_enable;

rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);

if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");

misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
}
}
#endif
}

#ifdef CONFIG_X86_32
Expand Down
5 changes: 5 additions & 0 deletions arch/x86/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
#include <linux/edac.h>
#endif

#include <asm/kmemcheck.h>
#include <asm/stacktrace.h>
#include <asm/processor.h>
#include <asm/debugreg.h>
Expand Down Expand Up @@ -534,6 +535,10 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)

get_debugreg(condition, 6);

/* Catch kmemcheck conditions first of all! */
if (condition & DR_STEP && kmemcheck_trap(regs))
return;

/*
* The processor cleared BTF, so don't mark that we need it set.
*/
Expand Down
18 changes: 15 additions & 3 deletions arch/x86/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

#include <asm/traps.h> /* dotraplinkage, ... */
#include <asm/pgalloc.h> /* pgd_*(), ... */
#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */

/*
* Page fault error code bits:
Expand Down Expand Up @@ -956,6 +957,13 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
/* Get the faulting address: */
address = read_cr2();

/*
* Detect and handle instructions that would cause a page fault for
* both a tracked kernel page and a userspace page.
*/
if (kmemcheck_active(regs))
kmemcheck_hide(regs);

if (unlikely(kmmio_fault(regs, address)))
return;

Expand All @@ -973,9 +981,13 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
* protection error (error_code & 9) == 0.
*/
if (unlikely(fault_in_kernel_space(address))) {
if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
vmalloc_fault(address) >= 0)
return;
if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
if (vmalloc_fault(address) >= 0)
return;

if (kmemcheck_fault(regs, address, error_code))
return;
}

/* Can handle a stale RO->RW TLB: */
if (spurious_fault(error_code, address))
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
if (!after_bootmem)
init_gbpages();

#ifdef CONFIG_DEBUG_PAGEALLOC
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
/*
* For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
* This will simplify cpa(), which otherwise needs to support splitting
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/mm/init_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
pte_t *page_table = NULL;

if (after_bootmem) {
#ifdef CONFIG_DEBUG_PAGEALLOC
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
#endif
if (!page_table)
Expand Down

0 comments on commit f856129

Please sign in to comment.