Skip to content

Commit

Permalink
mm/page_alloc: use only one PCP list for THP-sized allocations
Browse files Browse the repository at this point in the history
The per_cpu_pages is cache-aligned on a standard x86-64 distribution
configuration but a later patch will add a new field which would push the
structure into the next cache line.  Use only one list to store THP-sized
pages on the per-cpu list.  This assumes that the vast majority of
THP-sized allocations are GFP_MOVABLE but even if it was another type, it
would not contribute to serious fragmentation that potentially causes a
later THP allocation failure.  Align per_cpu_pages on the cacheline
boundary to ensure there is no false cache sharing.

After this patch, the structure sizing is;

struct per_cpu_pages {
        int                        count;                /*     0     4 */
        int                        high;                 /*     4     4 */
        int                        batch;                /*     8     4 */
        short int                  free_factor;          /*    12     2 */
        short int                  expire;               /*    14     2 */
        struct list_head           lists[13];            /*    16   208 */

        /* size: 256, cachelines: 4, members: 6 */
        /* padding: 32 */
} __attribute__((__aligned__(64)));

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Mel Gorman <[email protected]>
Tested-by: Minchan Kim <[email protected]>
Acked-by: Minchan Kim <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
Tested-by: Yu Zhao <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Marcelo Tosatti <[email protected]>
Cc: Marek Szyprowski <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Nicolas Saenz Julienne <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
gormanm authored and akpm00 committed Jul 18, 2022
1 parent bf75f20 commit 5d0a661
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 6 deletions.
11 changes: 7 additions & 4 deletions include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -355,15 +355,18 @@ enum zone_watermarks {
};

/*
* One per migratetype for each PAGE_ALLOC_COSTLY_ORDER plus one additional
* for pageblock size for THP if configured.
* One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. One additional list
* for THP which will usually be GFP_MOVABLE. Even if it is another type,
* it should not contribute to serious fragmentation causing THP allocation
* failures.
*/
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define NR_PCP_THP 1
#else
#define NR_PCP_THP 0
#endif
#define NR_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1 + NR_PCP_THP))
#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1))
#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)

/*
* Shift to encode migratetype and order in the same integer, with order
Expand All @@ -389,7 +392,7 @@ struct per_cpu_pages {

/* Lists of pages, one per migrate type stored on the pcp-lists */
struct list_head lists[NR_PCP_LISTS];
};
} ____cacheline_aligned_in_smp;

struct per_cpu_zonestat {
#ifdef CONFIG_SMP
Expand Down
4 changes: 2 additions & 2 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -653,7 +653,7 @@ static inline unsigned int order_to_pindex(int migratetype, int order)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (order > PAGE_ALLOC_COSTLY_ORDER) {
VM_BUG_ON(order != pageblock_order);
base = PAGE_ALLOC_COSTLY_ORDER + 1;
return NR_LOWORDER_PCP_LISTS;
}
#else
VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
Expand All @@ -667,7 +667,7 @@ static inline int pindex_to_order(unsigned int pindex)
int order = pindex / MIGRATE_PCPTYPES;

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (order > PAGE_ALLOC_COSTLY_ORDER)
if (pindex == NR_LOWORDER_PCP_LISTS)
order = pageblock_order;
#else
VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
Expand Down

0 comments on commit 5d0a661

Please sign in to comment.