Skip to content

Commit

Permalink
mm/page_alloc: make deferred page init free pages in MAX_ORDER blocks
Browse files Browse the repository at this point in the history
Normal page init path frees pages during the boot in MAX_ORDER chunks, but
deferred page init path does it in pageblock blocks.

Change deferred page init path to work in MAX_ORDER blocks.

For cases when MAX_ORDER is larger than pageblock, set migrate type to
MIGRATE_MOVABLE for all pageblocks covered by the page.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Kirill A. Shutemov <[email protected]>
Reviewed-by: Vlastimil Babka <[email protected]>
Acked-by: David Hildenbrand <[email protected]>
Acked-by: Mel Gorman <[email protected]>
Acked-by: Mike Rapoport (IBM) <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
kiryl authored and akpm00 committed Apr 6, 2023
1 parent 4a06f6f commit 3f6dac0
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 9 deletions.
2 changes: 2 additions & 0 deletions include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@
#endif
#define MAX_ORDER_NR_PAGES (1 << MAX_ORDER)

#define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES)

/*
* PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
* costly to service. That is between allocation orders which should
Expand Down
19 changes: 10 additions & 9 deletions mm/mm_init.c
Original file line number Diff line number Diff line change
Expand Up @@ -1951,9 +1951,10 @@ static void __init deferred_free_range(unsigned long pfn,
page = pfn_to_page(pfn);

/* Free a large naturally-aligned chunk if possible */
if (nr_pages == pageblock_nr_pages && pageblock_aligned(pfn)) {
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
__free_pages_core(page, pageblock_order);
if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) {
for (i = 0; i < nr_pages; i += pageblock_nr_pages)
set_pageblock_migratetype(page + i, MIGRATE_MOVABLE);
__free_pages_core(page, MAX_ORDER);
return;
}

Expand All @@ -1977,19 +1978,19 @@ static inline void __init pgdat_init_report_one_done(void)
/*
* Returns true if page needs to be initialized or freed to buddy allocator.
*
* We check if a current large page is valid by only checking the validity
* We check if a current MAX_ORDER block is valid by only checking the validity
* of the head pfn.
*/
static inline bool __init deferred_pfn_valid(unsigned long pfn)
{
if (pageblock_aligned(pfn) && !pfn_valid(pfn))
if (IS_MAX_ORDER_ALIGNED(pfn) && !pfn_valid(pfn))
return false;
return true;
}

/*
* Free pages to buddy allocator. Try to free aligned pages in
* pageblock_nr_pages sizes.
* MAX_ORDER_NR_PAGES sizes.
*/
static void __init deferred_free_pages(unsigned long pfn,
unsigned long end_pfn)
Expand All @@ -2000,7 +2001,7 @@ static void __init deferred_free_pages(unsigned long pfn,
if (!deferred_pfn_valid(pfn)) {
deferred_free_range(pfn - nr_free, nr_free);
nr_free = 0;
} else if (pageblock_aligned(pfn)) {
} else if (IS_MAX_ORDER_ALIGNED(pfn)) {
deferred_free_range(pfn - nr_free, nr_free);
nr_free = 1;
} else {
Expand All @@ -2013,7 +2014,7 @@ static void __init deferred_free_pages(unsigned long pfn,

/*
* Initialize struct pages. We minimize pfn page lookups and scheduler checks
* by performing it only once every pageblock_nr_pages.
* by performing it only once every MAX_ORDER_NR_PAGES.
* Return number of pages initialized.
*/
static unsigned long __init deferred_init_pages(struct zone *zone,
Expand All @@ -2029,7 +2030,7 @@ static unsigned long __init deferred_init_pages(struct zone *zone,
if (!deferred_pfn_valid(pfn)) {
page = NULL;
continue;
} else if (!page || pageblock_aligned(pfn)) {
} else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) {
page = pfn_to_page(pfn);
} else {
page++;
Expand Down

0 comments on commit 3f6dac0

Please sign in to comment.