Skip to content

Commit

Permalink
x86/mm: split vmalloc_sync_all()
Browse files Browse the repository at this point in the history
Commit 3f8fd02 ("mm/vmalloc: Sync unmappings in
__purge_vmap_area_lazy()") introduced a call to vmalloc_sync_all() in
the vunmap() code-path.  While this change was necessary to maintain
correctness on x86-32-pae kernels, it also adds additional cycles for
architectures that don't need it.

Specifically on x86-64 with CONFIG_VMAP_STACK=y some people reported
severe performance regressions in micro-benchmarks because it now also
calls the x86-64 implementation of vmalloc_sync_all() on vunmap().  But
the vmalloc_sync_all() implementation on x86-64 is only needed for newly
created mappings.

To avoid the unnecessary work on x86-64 and to gain the performance
back, split up vmalloc_sync_all() into two functions:

	* vmalloc_sync_mappings(), and
	* vmalloc_sync_unmappings()

Most call-sites to vmalloc_sync_all() only care about new mappings being
synchronized.  The only exception is the new call-site added in the
above mentioned commit.

Shile Zhang directed us to a report of an 80% regression in reaim
throughput.

Fixes: 3f8fd02 ("mm/vmalloc: Sync unmappings in __purge_vmap_area_lazy()")
Reported-by: kernel test robot <[email protected]>
Reported-by: Shile Zhang <[email protected]>
Signed-off-by: Joerg Roedel <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Tested-by: Borislav Petkov <[email protected]>
Acked-by: Rafael J. Wysocki <[email protected]>	[GHES]
Cc: Dave Hansen <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Link: https://lists.01.org/hyperkitty/list/[email protected]/thread/4D3JPPHBNOSPFK2KEPC6KGKS6J25AIDB/
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
joergroedel authored and torvalds committed Mar 22, 2020
1 parent 0715e6c commit 763802b
Show file tree
Hide file tree
Showing 6 changed files with 43 additions and 13 deletions.
26 changes: 24 additions & 2 deletions arch/x86/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
return pmd_k;
}

void vmalloc_sync_all(void)
static void vmalloc_sync(void)
{
unsigned long address;

Expand All @@ -217,6 +217,16 @@ void vmalloc_sync_all(void)
}
}

void vmalloc_sync_mappings(void)
{
vmalloc_sync();
}

void vmalloc_sync_unmappings(void)
{
vmalloc_sync();
}

/*
* 32-bit:
*
Expand Down Expand Up @@ -319,11 +329,23 @@ static void dump_pagetable(unsigned long address)

#else /* CONFIG_X86_64: */

void vmalloc_sync_all(void)
void vmalloc_sync_mappings(void)
{
/*
* 64-bit mappings might allocate new p4d/pud pages
* that need to be propagated to all tasks' PGDs.
*/
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
}

void vmalloc_sync_unmappings(void)
{
/*
* Unmappings never allocate or free p4d/pud pages.
* No work is required here.
*/
}

/*
* 64-bit:
*
Expand Down
2 changes: 1 addition & 1 deletion drivers/acpi/apei/ghes.c
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ int ghes_estatus_pool_init(int num_ghes)
* New allocation must be visible in all pgd before it can be found by
* an NMI allocating from the pool.
*/
vmalloc_sync_all();
vmalloc_sync_mappings();

rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
if (rc)
Expand Down
5 changes: 3 additions & 2 deletions include/linux/vmalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,9 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,

extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff);
void vmalloc_sync_all(void);

void vmalloc_sync_mappings(void);
void vmalloc_sync_unmappings(void);

/*
* Lowlevel-APIs (not for driver use!)
*/
Expand Down
2 changes: 1 addition & 1 deletion kernel/notifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -519,7 +519,7 @@ NOKPROBE_SYMBOL(notify_die);

int register_die_notifier(struct notifier_block *nb)
{
vmalloc_sync_all();
vmalloc_sync_mappings();
return atomic_notifier_chain_register(&die_chain, nb);
}
EXPORT_SYMBOL_GPL(register_die_notifier);
Expand Down
10 changes: 7 additions & 3 deletions mm/nommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -370,10 +370,14 @@ void vm_unmap_aliases(void)
EXPORT_SYMBOL_GPL(vm_unmap_aliases);

/*
* Implement a stub for vmalloc_sync_all() if the architecture chose not to
* have one.
* Implement a stub for vmalloc_sync_[un]mapping() if the architecture
* chose not to have one.
*/
void __weak vmalloc_sync_all(void)
void __weak vmalloc_sync_mappings(void)
{
}

void __weak vmalloc_sync_unmappings(void)
{
}

Expand Down
11 changes: 7 additions & 4 deletions mm/vmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1295,7 +1295,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
* First make sure the mappings are removed from all page-tables
* before they are freed.
*/
vmalloc_sync_all();
vmalloc_sync_unmappings();

/*
* TODO: to calculate a flush range without looping.
Expand Down Expand Up @@ -3128,16 +3128,19 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
EXPORT_SYMBOL(remap_vmalloc_range);

/*
* Implement a stub for vmalloc_sync_all() if the architecture chose not to
* have one.
* Implement stubs for vmalloc_sync_[un]mappings () if the architecture chose
* not to have one.
*
* The purpose of this function is to make sure the vmalloc area
* mappings are identical in all page-tables in the system.
*/
void __weak vmalloc_sync_all(void)
void __weak vmalloc_sync_mappings(void)
{
}

void __weak vmalloc_sync_unmappings(void)
{
}

static int f(pte_t *pte, unsigned long addr, void *data)
{
Expand Down

0 comments on commit 763802b

Please sign in to comment.