Skip to content

Commit

Permalink
x86/mm: Remove debug/x86/tlb_defer_switch_to_init_mm
Browse files Browse the repository at this point in the history
Borislav thinks that we don't need this knob in a released kernel.
Get rid of it.

Requested-by: Borislav Petkov <[email protected]>
Signed-off-by: Andy Lutomirski <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Fixes: b956575 ("x86/mm: Flush more aggressively in lazy TLB mode")
Link: http://lkml.kernel.org/r/1fa72431924e81e86c164ff7881bf9240d1f1a6c.1508000261.git.luto@kernel.org
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
amluto authored and Ingo Molnar committed Oct 18, 2017
1 parent 4e57b94 commit 7ac7f2c
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 66 deletions.
20 changes: 12 additions & 8 deletions arch/x86/include/asm/tlbflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,16 +82,20 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
#endif

/*
* If tlb_use_lazy_mode is true, then we try to avoid switching CR3 to point
* to init_mm when we switch to a kernel thread (e.g. the idle thread). If
* it's false, then we immediately switch CR3 when entering a kernel thread.
*/
DECLARE_STATIC_KEY_TRUE(__tlb_defer_switch_to_init_mm);

static inline bool tlb_defer_switch_to_init_mm(void)
{
return static_branch_unlikely(&__tlb_defer_switch_to_init_mm);
/*
* If we have PCID, then switching to init_mm is reasonably
* fast. If we don't have PCID, then switching to init_mm is
* quite slow, so we try to defer it in the hopes that we can
* avoid it entirely. The latter approach runs the risk of
* receiving otherwise unnecessary IPIs.
*
* This choice is just a heuristic. The tlb code can handle this
* function returning true or false regardless of whether we have
* PCID.
*/
return !static_cpu_has(X86_FEATURE_PCID);
}

/*
Expand Down
58 changes: 0 additions & 58 deletions arch/x86/mm/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@

atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);

DEFINE_STATIC_KEY_TRUE(__tlb_defer_switch_to_init_mm);

static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
u16 *new_asid, bool *need_flush)
Expand Down Expand Up @@ -629,60 +628,3 @@ static int __init create_tlb_single_page_flush_ceiling(void)
return 0;
}
late_initcall(create_tlb_single_page_flush_ceiling);

static ssize_t tlblazy_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
char buf[2];

buf[0] = static_branch_likely(&__tlb_defer_switch_to_init_mm)
? '1' : '0';
buf[1] = '\n';

return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}

static ssize_t tlblazy_write_file(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
bool val;

if (kstrtobool_from_user(user_buf, count, &val))
return -EINVAL;

if (val)
static_branch_enable(&__tlb_defer_switch_to_init_mm);
else
static_branch_disable(&__tlb_defer_switch_to_init_mm);

return count;
}

static const struct file_operations fops_tlblazy = {
.read = tlblazy_read_file,
.write = tlblazy_write_file,
.llseek = default_llseek,
};

static int __init init_tlblazy(void)
{
if (boot_cpu_has(X86_FEATURE_PCID)) {
/*
* If we have PCID, then switching to init_mm is reasonably
* fast. If we don't have PCID, then switching to init_mm is
* quite slow, so we default to trying to defer it in the
* hopes that we can avoid it entirely. The latter approach
* runs the risk of receiving otherwise unnecessary IPIs.
*
* We can't do this in setup_pcid() because static keys
* haven't been initialized yet, and it would blow up
* badly.
*/
static_branch_disable(&__tlb_defer_switch_to_init_mm);
}

debugfs_create_file("tlb_defer_switch_to_init_mm", S_IRUSR | S_IWUSR,
arch_debugfs_dir, NULL, &fops_tlblazy);
return 0;
}
late_initcall(init_tlblazy);

0 comments on commit 7ac7f2c

Please sign in to comment.