Skip to content

Commit

Permalink
x86/mm/tlb: Privatize cpu_tlbstate
Browse files Browse the repository at this point in the history
cpu_tlbstate is mostly private and only the variable is_lazy is shared.
This causes some false-sharing when TLB flushes are performed.

Break cpu_tlbstate intro cpu_tlbstate and cpu_tlbstate_shared, and mark
each one accordingly.

Signed-off-by: Nadav Amit <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Reviewed-by: Dave Hansen <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
  • Loading branch information
anadav authored and Ingo Molnar committed Mar 6, 2021
1 parent 4ce94ea commit 2f4305b
Show file tree
Hide file tree
Showing 4 changed files with 33 additions and 27 deletions.
39 changes: 21 additions & 18 deletions arch/x86/include/asm/tlbflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,23 +89,6 @@ struct tlb_state {
u16 loaded_mm_asid;
u16 next_asid;

/*
* We can be in one of several states:
*
* - Actively using an mm. Our CPU's bit will be set in
* mm_cpumask(loaded_mm) and is_lazy == false;
*
* - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
* will not be set in mm_cpumask(&init_mm) and is_lazy == false.
*
* - Lazily using a real mm. loaded_mm != &init_mm, our bit
* is set in mm_cpumask(loaded_mm), but is_lazy == true.
* We're heuristically guessing that the CR3 load we
* skipped more than makes up for the overhead added by
* lazy mode.
*/
bool is_lazy;

/*
* If set we changed the page tables in such a way that we
* needed an invalidation of all contexts (aka. PCIDs / ASIDs).
Expand Down Expand Up @@ -151,7 +134,27 @@ struct tlb_state {
*/
struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
};
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
DECLARE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate);

struct tlb_state_shared {
/*
* We can be in one of several states:
*
* - Actively using an mm. Our CPU's bit will be set in
* mm_cpumask(loaded_mm) and is_lazy == false;
*
* - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
* will not be set in mm_cpumask(&init_mm) and is_lazy == false.
*
* - Lazily using a real mm. loaded_mm != &init_mm, our bit
* is set in mm_cpumask(loaded_mm), but is_lazy == true.
* We're heuristically guessing that the CR3 load we
* skipped more than makes up for the overhead added by
* lazy mode.
*/
bool is_lazy;
};
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);

bool nmi_uaccess_okay(void);
#define nmi_uaccess_okay nmi_uaccess_okay
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/alternative.c
Original file line number Diff line number Diff line change
Expand Up @@ -813,7 +813,7 @@ static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
* with a stale address space WITHOUT being in lazy mode after
* restoring the previous mm.
*/
if (this_cpu_read(cpu_tlbstate.is_lazy))
if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
leave_mm(smp_processor_id());

temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -1017,7 +1017,7 @@ void __init zone_sizes_init(void)
free_area_init(max_zone_pfns);
}

__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
__visible DEFINE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate) = {
.loaded_mm = &init_mm,
.next_asid = 1,
.cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
Expand Down
17 changes: 10 additions & 7 deletions arch/x86/mm/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,7 @@ void leave_mm(int cpu)
return;

/* Warn if we're not lazy. */
WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
WARN_ON(!this_cpu_read(cpu_tlbstate_shared.is_lazy));

switch_mm(NULL, &init_mm, NULL);
}
Expand Down Expand Up @@ -424,7 +424,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
{
struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
bool was_lazy = this_cpu_read(cpu_tlbstate.is_lazy);
bool was_lazy = this_cpu_read(cpu_tlbstate_shared.is_lazy);
unsigned cpu = smp_processor_id();
u64 next_tlb_gen;
bool need_flush;
Expand Down Expand Up @@ -469,7 +469,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
__flush_tlb_all();
}
#endif
this_cpu_write(cpu_tlbstate.is_lazy, false);
this_cpu_write(cpu_tlbstate_shared.is_lazy, false);

/*
* The membarrier system call requires a full memory barrier and
Expand All @@ -490,7 +490,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
/*
* Even in lazy TLB mode, the CPU should stay set in the
* mm_cpumask. The TLB shootdown code can figure out from
* cpu_tlbstate.is_lazy whether or not to send an IPI.
* cpu_tlbstate_shared.is_lazy whether or not to send an IPI.
*/
if (WARN_ON_ONCE(real_prev != &init_mm &&
!cpumask_test_cpu(cpu, mm_cpumask(next))))
Expand Down Expand Up @@ -598,7 +598,7 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
return;

this_cpu_write(cpu_tlbstate.is_lazy, true);
this_cpu_write(cpu_tlbstate_shared.is_lazy, true);
}

/*
Expand Down Expand Up @@ -690,7 +690,7 @@ static void flush_tlb_func(void *info)
VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
loaded_mm->context.ctx_id);

if (this_cpu_read(cpu_tlbstate.is_lazy)) {
if (this_cpu_read(cpu_tlbstate_shared.is_lazy)) {
/*
* We're in lazy mode. We need to at least flush our
* paging-structure cache to avoid speculatively reading
Expand Down Expand Up @@ -790,11 +790,14 @@ static void flush_tlb_func(void *info)

static bool tlb_is_not_lazy(int cpu)
{
return !per_cpu(cpu_tlbstate.is_lazy, cpu);
return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
}

static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);

DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
EXPORT_PER_CPU_SYMBOL(cpu_tlbstate_shared);

STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
const struct flush_tlb_info *info)
{
Expand Down

0 comments on commit 2f4305b

Please sign in to comment.