Skip to content

Commit

Permalink
mm: meminit: move page initialization into a separate function
Browse files Browse the repository at this point in the history
Currently, memmap_init_zone() has all the smarts for initializing a single
page.  A subset of this is required for parallel page initialisation and
so this patch breaks up the monolithic function in preparation.

Signed-off-by: Robin Holt <[email protected]>
Signed-off-by: Nathan Zimmer <[email protected]>
Signed-off-by: Mel Gorman <[email protected]>
Tested-by: Nate Zimmer <[email protected]>
Tested-by: Waiman Long <[email protected]>
Tested-by: Daniel J Blueman <[email protected]>
Acked-by: Pekka Enberg <[email protected]>
Cc: Robin Holt <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Waiman Long <[email protected]>
Cc: Scott Norton <[email protected]>
Cc: "Luck, Tony" <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Robin Holt authored and torvalds committed Jul 1, 2015
1 parent 8e7a7f8 commit 1e8ce83
Showing 1 changed file with 46 additions and 33 deletions.
79 changes: 46 additions & 33 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -764,6 +764,51 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
return 0;
}

static void __meminit __init_single_page(struct page *page, unsigned long pfn,
unsigned long zone, int nid)
{
struct zone *z = &NODE_DATA(nid)->node_zones[zone];

set_page_links(page, zone, nid, pfn);
mminit_verify_page_links(page, zone, nid, pfn);
init_page_count(page);
page_mapcount_reset(page);
page_cpupid_reset_last(page);
SetPageReserved(page);

/*
* Mark the block movable so that blocks are reserved for
* movable at startup. This will force kernel allocations
* to reserve their blocks rather than leaking throughout
* the address space during boot when many long-lived
* kernel allocations are made. Later some blocks near
* the start are marked MIGRATE_RESERVE by
* setup_zone_migrate_reserve()
*
* bitmap is created for zone's valid pfn range. but memmap
* can be created for invalid pages (for alignment)
* check here not to call set_pageblock_migratetype() against
* pfn out of zone.
*/
if ((z->zone_start_pfn <= pfn)
&& (pfn < zone_end_pfn(z))
&& !(pfn & (pageblock_nr_pages - 1)))
set_pageblock_migratetype(page, MIGRATE_MOVABLE);

INIT_LIST_HEAD(&page->lru);
#ifdef WANT_PAGE_VIRTUAL
/* The shift won't overflow because ZONE_NORMAL is below 4G. */
if (!is_highmem_idx(zone))
set_page_address(page, __va(pfn << PAGE_SHIFT));
#endif
}

static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
int nid)
{
return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
}

static bool free_pages_prepare(struct page *page, unsigned int order)
{
bool compound = PageCompound(page);
Expand Down Expand Up @@ -4212,7 +4257,6 @@ static void setup_zone_migrate_reserve(struct zone *zone)
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn, enum memmap_context context)
{
struct page *page;
unsigned long end_pfn = start_pfn + size;
unsigned long pfn;
struct zone *z;
Expand All @@ -4233,38 +4277,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
if (!early_pfn_in_nid(pfn, nid))
continue;
}
page = pfn_to_page(pfn);
set_page_links(page, zone, nid, pfn);
mminit_verify_page_links(page, zone, nid, pfn);
init_page_count(page);
page_mapcount_reset(page);
page_cpupid_reset_last(page);
SetPageReserved(page);
/*
* Mark the block movable so that blocks are reserved for
* movable at startup. This will force kernel allocations
* to reserve their blocks rather than leaking throughout
* the address space during boot when many long-lived
* kernel allocations are made. Later some blocks near
* the start are marked MIGRATE_RESERVE by
* setup_zone_migrate_reserve()
*
* bitmap is created for zone's valid pfn range. but memmap
* can be created for invalid pages (for alignment)
* check here not to call set_pageblock_migratetype() against
* pfn out of zone.
*/
if ((z->zone_start_pfn <= pfn)
&& (pfn < zone_end_pfn(z))
&& !(pfn & (pageblock_nr_pages - 1)))
set_pageblock_migratetype(page, MIGRATE_MOVABLE);

INIT_LIST_HEAD(&page->lru);
#ifdef WANT_PAGE_VIRTUAL
/* The shift won't overflow because ZONE_NORMAL is below 4G. */
if (!is_highmem_idx(zone))
set_page_address(page, __va(pfn << PAGE_SHIFT));
#endif
__init_single_pfn(pfn, zone, nid);
}
}

Expand Down

0 comments on commit 1e8ce83

Please sign in to comment.