Skip to content

Commit

Permalink
mm: remove vmap_page_range_noflush and vunmap_page_range
Browse files Browse the repository at this point in the history
These have non-static aliases called map_kernel_range_noflush and
unmap_kernel_range_noflush that just differ slightly in the calling
conventions that pass addr + size instead of an end.

Signed-off-by: Christoph Hellwig <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Acked-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Christian Borntraeger <[email protected]>
Cc: Christophe Leroy <[email protected]>
Cc: Daniel Vetter <[email protected]>
Cc: David Airlie <[email protected]>
Cc: Gao Xiang <[email protected]>
Cc: Greg Kroah-Hartman <[email protected]>
Cc: Haiyang Zhang <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: "K. Y. Srinivasan" <[email protected]>
Cc: Laura Abbott <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Michael Kelley <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: Nitin Gupta <[email protected]>
Cc: Robin Murphy <[email protected]>
Cc: Sakari Ailus <[email protected]>
Cc: Stephen Hemminger <[email protected]>
Cc: Sumit Semwal <[email protected]>
Cc: Wei Liu <[email protected]>
Cc: Benjamin Herrenschmidt <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: Heiko Carstens <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Vasily Gorbik <[email protected]>
Cc: Will Deacon <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Christoph Hellwig authored and torvalds committed Jun 2, 2020
1 parent 78a0e8c commit b521c43
Showing 1 changed file with 40 additions and 58 deletions.
98 changes: 40 additions & 58 deletions mm/vmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -128,10 +128,24 @@ static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
} while (p4d++, addr = next, addr != end);
}

static void vunmap_page_range(unsigned long addr, unsigned long end)
/**
* unmap_kernel_range_noflush - unmap kernel VM area
* @addr: start of the VM area to unmap
* @size: size of the VM area to unmap
*
* Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size specify
* should have been allocated using get_vm_area() and its friends.
*
* NOTE:
* This function does NOT do any cache flushing. The caller is responsible
* for calling flush_cache_vunmap() on to-be-mapped areas before calling this
* function and flush_tlb_kernel_range() after.
*/
void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
{
pgd_t *pgd;
unsigned long end = addr + size;
unsigned long next;
pgd_t *pgd;

BUG_ON(addr >= end);
pgd = pgd_offset_k(addr);
Expand Down Expand Up @@ -220,18 +234,30 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
return 0;
}

/*
* Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
* will have pfns corresponding to the "pages" array.
/**
* map_kernel_range_noflush - map kernel VM area with the specified pages
* @addr: start of the VM area to map
* @size: size of the VM area to map
* @prot: page protection flags to use
* @pages: pages to map
*
* Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
* Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should
* have been allocated using get_vm_area() and its friends.
*
* NOTE:
* This function does NOT do any cache flushing. The caller is responsible for
* calling flush_cache_vmap() on to-be-mapped areas before calling this
* function.
*
* RETURNS:
* The number of pages mapped on success, -errno on failure.
*/
static int vmap_page_range_noflush(unsigned long start, unsigned long end,
pgprot_t prot, struct page **pages)
int map_kernel_range_noflush(unsigned long addr, unsigned long size,
pgprot_t prot, struct page **pages)
{
pgd_t *pgd;
unsigned long end = addr + size;
unsigned long next;
unsigned long addr = start;
pgd_t *pgd;
int err = 0;
int nr = 0;

Expand All @@ -252,7 +278,7 @@ static int vmap_page_range(unsigned long start, unsigned long end,
{
int ret;

ret = vmap_page_range_noflush(start, end, prot, pages);
ret = map_kernel_range_noflush(start, end - start, prot, pages);
flush_cache_vmap(start, end);
return ret;
}
Expand Down Expand Up @@ -1227,7 +1253,7 @@ EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
*/
static void unmap_vmap_area(struct vmap_area *va)
{
vunmap_page_range(va->va_start, va->va_end);
unmap_kernel_range_noflush(va->va_start, va->va_end - va->va_start);
}

/*
Expand Down Expand Up @@ -1687,7 +1713,7 @@ static void vb_free(unsigned long addr, unsigned long size)
rcu_read_unlock();
BUG_ON(!vb);

vunmap_page_range(addr, addr + size);
unmap_kernel_range_noflush(addr, size);

if (debug_pagealloc_enabled_static())
flush_tlb_kernel_range(addr, addr + size);
Expand Down Expand Up @@ -1985,50 +2011,6 @@ void __init vmalloc_init(void)
vmap_initialized = true;
}

/**
* map_kernel_range_noflush - map kernel VM area with the specified pages
* @addr: start of the VM area to map
* @size: size of the VM area to map
* @prot: page protection flags to use
* @pages: pages to map
*
* Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
* specify should have been allocated using get_vm_area() and its
* friends.
*
* NOTE:
* This function does NOT do any cache flushing. The caller is
* responsible for calling flush_cache_vmap() on to-be-mapped areas
* before calling this function.
*
* RETURNS:
* The number of pages mapped on success, -errno on failure.
*/
int map_kernel_range_noflush(unsigned long addr, unsigned long size,
pgprot_t prot, struct page **pages)
{
return vmap_page_range_noflush(addr, addr + size, prot, pages);
}

/**
* unmap_kernel_range_noflush - unmap kernel VM area
* @addr: start of the VM area to unmap
* @size: size of the VM area to unmap
*
* Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
* specify should have been allocated using get_vm_area() and its
* friends.
*
* NOTE:
* This function does NOT do any cache flushing. The caller is
* responsible for calling flush_cache_vunmap() on to-be-mapped areas
* before calling this function and flush_tlb_kernel_range() after.
*/
void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
{
vunmap_page_range(addr, addr + size);
}

/**
* unmap_kernel_range - unmap kernel VM area and flush cache and TLB
* @addr: start of the VM area to unmap
Expand All @@ -2042,7 +2024,7 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
unsigned long end = addr + size;

flush_cache_vunmap(addr, end);
vunmap_page_range(addr, end);
unmap_kernel_range_noflush(addr, size);
flush_tlb_kernel_range(addr, end);
}

Expand Down

0 comments on commit b521c43

Please sign in to comment.