Skip to content

Commit

Permalink
mm/page_alloc: combine __alloc_pages and __alloc_pages_nodemask
Browse files Browse the repository at this point in the history
There are only two callers of __alloc_pages() so prune the thicket of
alloc_page variants by combining the two functions together.  Current
callers of __alloc_pages() simply add an extra 'NULL' parameter and
current callers of __alloc_pages_nodemask() call __alloc_pages() instead.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Cc: Mike Rapoport <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Matthew Wilcox (Oracle) authored and torvalds committed Apr 30, 2021
1 parent 6e5e0f2 commit 84172f4
Show file tree
Hide file tree
Showing 7 changed files with 13 additions and 21 deletions.
2 changes: 1 addition & 1 deletion Documentation/admin-guide/mm/transhuge.rst
Original file line number Diff line number Diff line change
Expand Up @@ -402,7 +402,7 @@ compact_fail
but failed.

It is possible to establish how long the stalls were using the function
tracer to record how long was spent in __alloc_pages_nodemask and
tracer to record how long was spent in __alloc_pages() and
using the mm_page_alloc tracepoint to identify which allocations were
for huge pages.

Expand Down
13 changes: 3 additions & 10 deletions include/linux/gfp.h
Original file line number Diff line number Diff line change
Expand Up @@ -515,15 +515,8 @@ static inline int arch_make_page_accessible(struct page *page)
}
#endif

struct page *
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
nodemask_t *nodemask);

static inline struct page *
__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
{
return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);
}
struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t *nodemask);

/*
* Allocate pages, preferring the node given as nid. The node must be valid and
Expand All @@ -535,7 +528,7 @@ __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));

return __alloc_pages(gfp_mask, order, nid);
return __alloc_pages(gfp_mask, order, nid, NULL);
}

/*
Expand Down
2 changes: 1 addition & 1 deletion mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -1616,7 +1616,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
gfp_mask |= __GFP_RETRY_MAYFAIL;
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
page = __alloc_pages(gfp_mask, order, nid, nmask);
if (page)
__count_vm_event(HTLB_BUDDY_PGALLOC);
else
Expand Down
4 changes: 2 additions & 2 deletions mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -145,10 +145,10 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
* family of functions.
*
* nodemask, migratetype and highest_zoneidx are initialized only once in
* __alloc_pages_nodemask() and then never change.
* __alloc_pages() and then never change.
*
* zonelist, preferred_zone and highest_zoneidx are set first in
* __alloc_pages_nodemask() for the fast path, and might be later changed
* __alloc_pages() for the fast path, and might be later changed
* in __alloc_pages_slowpath(). All other functions pass the whole structure
* by a const pointer.
*/
Expand Down
6 changes: 3 additions & 3 deletions mm/mempolicy.c
Original file line number Diff line number Diff line change
Expand Up @@ -2140,7 +2140,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
{
struct page *page;

page = __alloc_pages(gfp, order, nid);
page = __alloc_pages(gfp, order, nid, NULL);
/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
if (!static_branch_likely(&vm_numa_stat_key))
return page;
Expand Down Expand Up @@ -2237,7 +2237,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,

nmask = policy_nodemask(gfp, pol);
preferred_nid = policy_node(gfp, pol, node);
page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
page = __alloc_pages(gfp, order, preferred_nid, nmask);
mpol_cond_put(pol);
out:
return page;
Expand Down Expand Up @@ -2274,7 +2274,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
if (pol->mode == MPOL_INTERLEAVE)
page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
else
page = __alloc_pages_nodemask(gfp, order,
page = __alloc_pages(gfp, order,
policy_node(gfp, pol, numa_node_id()),
policy_nodemask(gfp, pol));

Expand Down
2 changes: 1 addition & 1 deletion mm/migrate.c
Original file line number Diff line number Diff line change
Expand Up @@ -1617,7 +1617,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
gfp_mask |= __GFP_HIGHMEM;

new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask);
new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);

if (new_page && PageTransHuge(new_page))
prep_transhuge_page(new_page);
Expand Down
5 changes: 2 additions & 3 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -5013,8 +5013,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
/*
* This is the 'heart' of the zoned buddy allocator.
*/
struct page *
__alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid,
struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t *nodemask)
{
struct page *page;
Expand Down Expand Up @@ -5076,7 +5075,7 @@ __alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid,

return page;
}
EXPORT_SYMBOL(__alloc_pages_nodemask);
EXPORT_SYMBOL(__alloc_pages);

/*
* Common helper functions. Never use with __GFP_HIGHMEM because the returned
Expand Down

0 comments on commit 84172f4

Please sign in to comment.