Skip to content

Commit

Permalink
kmemcheck: rip it out
Browse files Browse the repository at this point in the history
Fix up makefiles, remove references, and git rm kmemcheck.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Sasha Levin <[email protected]>
Cc: Steven Rostedt <[email protected]>
Cc: Vegard Nossum <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Eric W. Biederman <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Tim Hansen <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Levin, Alexander (Sasha Levin) authored and torvalds committed Nov 16, 2017
1 parent d8be756 commit 4675ff0
Show file tree
Hide file tree
Showing 35 changed files with 7 additions and 2,592 deletions.
7 changes: 0 additions & 7 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1864,13 +1864,6 @@
Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y,
the default is off.

kmemcheck= [X86] Boot-time kmemcheck enable/disable/one-shot mode
Valid arguments: 0, 1, 2
kmemcheck=0 (disabled)
kmemcheck=1 (enabled)
kmemcheck=2 (one-shot mode)
Default: 2 (one-shot mode)

kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
Default is 0 (don't ignore, but inject #GP)

Expand Down
1 change: 0 additions & 1 deletion Documentation/dev-tools/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ whole; patches welcome!
kasan
ubsan
kmemleak
kmemcheck
gdb-kernel-debugging
kgdb
kselftest
Expand Down
733 changes: 0 additions & 733 deletions Documentation/dev-tools/kmemcheck.rst

This file was deleted.

10 changes: 0 additions & 10 deletions MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -7688,16 +7688,6 @@ F: include/linux/kdb.h
F: include/linux/kgdb.h
F: kernel/debug/

KMEMCHECK
M: Vegard Nossum <[email protected]>
M: Pekka Enberg <[email protected]>
S: Maintained
F: Documentation/dev-tools/kmemcheck.rst
F: arch/x86/include/asm/kmemcheck.h
F: arch/x86/mm/kmemcheck/
F: include/linux/kmemcheck.h
F: mm/kmemcheck.c

KMEMLEAK
M: Catalin Marinas <[email protected]>
S: Maintained
Expand Down
3 changes: 1 addition & 2 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,6 @@ config X86
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
select HAVE_ARCH_KGDB
select HAVE_ARCH_KMEMCHECK
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT
Expand Down Expand Up @@ -1430,7 +1429,7 @@ config ARCH_DMA_ADDR_T_64BIT

config X86_DIRECT_GBPAGES
def_bool y
depends on X86_64 && !DEBUG_PAGEALLOC && !KMEMCHECK
depends on X86_64 && !DEBUG_PAGEALLOC
---help---
Certain kernel features effectively disable kernel
linear 1 GB mappings (even if the CPU otherwise
Expand Down
42 changes: 0 additions & 42 deletions arch/x86/include/asm/kmemcheck.h
Original file line number Diff line number Diff line change
@@ -1,43 +1 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASM_X86_KMEMCHECK_H
#define ASM_X86_KMEMCHECK_H

#include <linux/types.h>
#include <asm/ptrace.h>

#ifdef CONFIG_KMEMCHECK
bool kmemcheck_active(struct pt_regs *regs);

void kmemcheck_show(struct pt_regs *regs);
void kmemcheck_hide(struct pt_regs *regs);

bool kmemcheck_fault(struct pt_regs *regs,
unsigned long address, unsigned long error_code);
bool kmemcheck_trap(struct pt_regs *regs);
#else
static inline bool kmemcheck_active(struct pt_regs *regs)
{
return false;
}

static inline void kmemcheck_show(struct pt_regs *regs)
{
}

static inline void kmemcheck_hide(struct pt_regs *regs)
{
}

static inline bool kmemcheck_fault(struct pt_regs *regs,
unsigned long address, unsigned long error_code)
{
return false;
}

static inline bool kmemcheck_trap(struct pt_regs *regs)
{
return false;
}
#endif /* CONFIG_KMEMCHECK */

#endif
9 changes: 0 additions & 9 deletions arch/x86/include/asm/string_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -179,8 +179,6 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
* No 3D Now!
*/

#ifndef CONFIG_KMEMCHECK

#if (__GNUC__ >= 4)
#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
#else
Expand All @@ -189,13 +187,6 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
? __constant_memcpy((t), (f), (n)) \
: __memcpy((t), (f), (n)))
#endif
#else
/*
* kmemcheck becomes very happy if we use the REP instructions unconditionally,
* because it means that we know both memory operands in advance.
*/
#define memcpy(t, f, n) __memcpy((t), (f), (n))
#endif

#endif
#endif /* !CONFIG_FORTIFY_SOURCE */
Expand Down
8 changes: 0 additions & 8 deletions arch/x86/include/asm/string_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ extern void *memcpy(void *to, const void *from, size_t len);
extern void *__memcpy(void *to, const void *from, size_t len);

#ifndef CONFIG_FORTIFY_SOURCE
#ifndef CONFIG_KMEMCHECK
#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
#define memcpy(dst, src, len) \
({ \
Expand All @@ -46,13 +45,6 @@ extern void *__memcpy(void *to, const void *from, size_t len);
__ret; \
})
#endif
#else
/*
* kmemcheck becomes very happy if we use the REP instructions unconditionally,
* because it means that we know both memory operands in advance.
*/
#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
#endif
#endif /* !CONFIG_FORTIFY_SOURCE */

#define __HAVE_ARCH_MEMSET
Expand Down
15 changes: 0 additions & 15 deletions arch/x86/kernel/cpu/intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -187,21 +187,6 @@ static void early_init_intel(struct cpuinfo_x86 *c)
if (c->x86 == 6 && c->x86_model < 15)
clear_cpu_cap(c, X86_FEATURE_PAT);

#ifdef CONFIG_KMEMCHECK
/*
* P4s have a "fast strings" feature which causes single-
* stepping REP instructions to only generate a #DB on
* cache-line boundaries.
*
* Ingo Molnar reported a Pentium D (model 6) and a Xeon
* (model 2) with the same problem.
*/
if (c->x86 == 15)
if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0)
pr_info("kmemcheck: Disabling fast string operations\n");
#endif

/*
* If fast string is not enabled in IA32_MISC_ENABLE for any reason,
* clear the fast string and enhanced fast string CPU capabilities.
Expand Down
2 changes: 0 additions & 2 deletions arch/x86/mm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,6 @@ obj-$(CONFIG_X86_PTDUMP) += debug_pagetables.o

obj-$(CONFIG_HIGHMEM) += highmem_32.o

obj-$(CONFIG_KMEMCHECK) += kmemcheck/

KASAN_SANITIZE_kasan_init_$(BITS).o := n
obj-$(CONFIG_KASAN) += kasan_init_$(BITS).o

Expand Down
5 changes: 2 additions & 3 deletions arch/x86/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -163,12 +163,11 @@ static int page_size_mask;
static void __init probe_page_size_mask(void)
{
/*
* For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will
* use small pages.
* For pagealloc debugging, identity mapping will use small pages.
* This will simplify cpa(), which otherwise needs to support splitting
* large pages into small in interrupt context, etc.
*/
if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK))
if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
page_size_mask |= 1 << PG_LEVEL_2M;
else
direct_gbpages = 0;
Expand Down
1 change: 0 additions & 1 deletion arch/x86/mm/kmemcheck/Makefile

This file was deleted.

Loading

0 comments on commit 4675ff0

Please sign in to comment.