Skip to content

Commit

Permalink
mm/vmalloc: introduce alloc_pages_bulk_array_mempolicy to accelerate …
Browse files Browse the repository at this point in the history
…memory allocation

Commit ffb29b1 ("mm/vmalloc: fix numa spreading for large hash
tables") can cause significant performance regressions in some
situations as Andrew mentioned in [1].  The main situation is vmalloc,
vmalloc will allocate pages with NUMA_NO_NODE by default, that will
result in alloc page one by one;

In order to solve this, __alloc_pages_bulk and mempolicy should be
considered at the same time.

1) If node is specified in memory allocation request, it will alloc all
   pages by __alloc_pages_bulk.

2) If interleaving allocate memory, it will cauculate how many pages
   should be allocated in each node, and use __alloc_pages_bulk to alloc
   pages in each node.

[1]: https://lore.kernel.org/lkml/CALvZod4G3SzP3kWxQYn0fj+VgG-G3yWXz=gz17+3N57ru1iajw@mail.gmail.com/t/#m750c8e3231206134293b089feaa090590afa0f60

[[email protected]: coding style fixes]
[[email protected]: make two functions static]
[[email protected]: fix CONFIG_NUMA=n build]

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Chen Wandun <[email protected]>
Reviewed-by: Uladzislau Rezki (Sony) <[email protected]>
Cc: Eric Dumazet <[email protected]>
Cc: Shakeel Butt <[email protected]>
Cc: Nicholas Piggin <[email protected]>
Cc: Kefeng Wang <[email protected]>
Cc: Hanjun Guo <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Chen Wandun authored and torvalds committed Nov 6, 2021
1 parent b7d90e7 commit c00b6b9
Show file tree
Hide file tree
Showing 3 changed files with 102 additions and 4 deletions.
4 changes: 4 additions & 0 deletions include/linux/gfp.h
Original file line number Diff line number Diff line change
Expand Up @@ -535,6 +535,10 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
struct list_head *page_list,
struct page **page_array);

unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
unsigned long nr_pages,
struct page **page_array);

/* Bulk allocate order-0 pages */
static inline unsigned long
alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list)
Expand Down
82 changes: 82 additions & 0 deletions mm/mempolicy.c
Original file line number Diff line number Diff line change
Expand Up @@ -2196,6 +2196,88 @@ struct page *alloc_pages(gfp_t gfp, unsigned order)
}
EXPORT_SYMBOL(alloc_pages);

static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
struct mempolicy *pol, unsigned long nr_pages,
struct page **page_array)
{
int nodes;
unsigned long nr_pages_per_node;
int delta;
int i;
unsigned long nr_allocated;
unsigned long total_allocated = 0;

nodes = nodes_weight(pol->nodes);
nr_pages_per_node = nr_pages / nodes;
delta = nr_pages - nodes * nr_pages_per_node;

for (i = 0; i < nodes; i++) {
if (delta) {
nr_allocated = __alloc_pages_bulk(gfp,
interleave_nodes(pol), NULL,
nr_pages_per_node + 1, NULL,
page_array);
delta--;
} else {
nr_allocated = __alloc_pages_bulk(gfp,
interleave_nodes(pol), NULL,
nr_pages_per_node, NULL, page_array);
}

page_array += nr_allocated;
total_allocated += nr_allocated;
}

return total_allocated;
}

static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
struct mempolicy *pol, unsigned long nr_pages,
struct page **page_array)
{
gfp_t preferred_gfp;
unsigned long nr_allocated = 0;

preferred_gfp = gfp | __GFP_NOWARN;
preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);

nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
nr_pages, NULL, page_array);

if (nr_allocated < nr_pages)
nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
nr_pages - nr_allocated, NULL,
page_array + nr_allocated);
return nr_allocated;
}

/* alloc pages bulk and mempolicy should be considered at the
* same time in some situation such as vmalloc.
*
* It can accelerate memory allocation especially interleaving
* allocate memory.
*/
unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
unsigned long nr_pages, struct page **page_array)
{
struct mempolicy *pol = &default_policy;

if (!in_interrupt() && !(gfp & __GFP_THISNODE))
pol = get_task_policy(current);

if (pol->mode == MPOL_INTERLEAVE)
return alloc_pages_bulk_array_interleave(gfp, pol,
nr_pages, page_array);

if (pol->mode == MPOL_PREFERRED_MANY)
return alloc_pages_bulk_array_preferred_many(gfp,
numa_node_id(), pol, nr_pages, page_array);

return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()),
policy_nodemask(gfp, pol), nr_pages, NULL,
page_array);
}

int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
{
struct mempolicy *pol = mpol_dup(vma_policy(src));
Expand Down
20 changes: 16 additions & 4 deletions mm/vmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -2843,7 +2843,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
* to fails, fallback to a single page allocator that is
* more permissive.
*/
if (!order && nid != NUMA_NO_NODE) {
if (!order) {
while (nr_allocated < nr_pages) {
unsigned int nr, nr_pages_request;

Expand All @@ -2855,8 +2855,20 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
*/
nr_pages_request = min(100U, nr_pages - nr_allocated);

nr = alloc_pages_bulk_array_node(gfp, nid,
nr_pages_request, pages + nr_allocated);
/* memory allocation should consider mempolicy, we can't
* wrongly use nearest node when nid == NUMA_NO_NODE,
* otherwise memory may be allocated in only one node,
* but mempolcy want to alloc memory by interleaving.
*/
if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
nr = alloc_pages_bulk_array_mempolicy(gfp,
nr_pages_request,
pages + nr_allocated);

else
nr = alloc_pages_bulk_array_node(gfp, nid,
nr_pages_request,
pages + nr_allocated);

nr_allocated += nr;
cond_resched();
Expand All @@ -2868,7 +2880,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
if (nr != nr_pages_request)
break;
}
} else if (order)
} else
/*
* Compound pages required for remap_vmalloc_page if
* high-order pages.
Expand Down

0 comments on commit c00b6b9

Please sign in to comment.