Skip to content

Commit

Permalink
mm, hugetlb: do not allocate non-migrateable gigantic pages from mova…
Browse files Browse the repository at this point in the history
…ble zones

alloc_gigantic_page doesn't consider movability of the gigantic hugetlb
when scanning eligible ranges for the allocation.  As 1GB hugetlb pages
are not movable currently this can break the movable zone assumption
that all allocations are migrateable and as such break memory hotplug.

Reorganize the code and use the standard zonelist allocations scheme
that we use for standard hugetbl pages.  htlb_alloc_mask will ensure
that only migratable hugetlb pages will ever see a movable zone.

Link: http://lkml.kernel.org/r/[email protected]
Fixes: 944d9fe ("hugetlb: add support for gigantic page allocation at runtime")
Signed-off-by: Michal Hocko <[email protected]>
Reviewed-by: Mike Kravetz <[email protected]>
Cc: Luiz Capitulino <[email protected]>
Cc: Naoya Horiguchi <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Michal Hocko authored and torvalds committed Sep 7, 2017
1 parent a36985d commit 79b63f1
Showing 1 changed file with 20 additions and 15 deletions.
35 changes: 20 additions & 15 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -1066,11 +1066,11 @@ static void free_gigantic_page(struct page *page, unsigned int order)
}

static int __alloc_gigantic_page(unsigned long start_pfn,
unsigned long nr_pages)
unsigned long nr_pages, gfp_t gfp_mask)
{
unsigned long end_pfn = start_pfn + nr_pages;
return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
GFP_KERNEL);
gfp_mask);
}

static bool pfn_range_valid_gigantic(struct zone *z,
Expand Down Expand Up @@ -1108,36 +1108,41 @@ static bool zone_spans_last_pfn(const struct zone *zone,
return zone_spans_pfn(zone, last_pfn);
}

static struct page *alloc_gigantic_page(int nid, unsigned int order)
static struct page *alloc_gigantic_page(int nid, struct hstate *h)
{
unsigned int order = huge_page_order(h);
unsigned long nr_pages = 1 << order;
unsigned long ret, pfn, flags;
struct zone *z;
struct zonelist *zonelist;
struct zone *zone;
struct zoneref *z;
gfp_t gfp_mask;

z = NODE_DATA(nid)->node_zones;
for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
spin_lock_irqsave(&z->lock, flags);
gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
zonelist = node_zonelist(nid, gfp_mask);
for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), NULL) {
spin_lock_irqsave(&zone->lock, flags);

pfn = ALIGN(z->zone_start_pfn, nr_pages);
while (zone_spans_last_pfn(z, pfn, nr_pages)) {
if (pfn_range_valid_gigantic(z, pfn, nr_pages)) {
pfn = ALIGN(zone->zone_start_pfn, nr_pages);
while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
/*
* We release the zone lock here because
* alloc_contig_range() will also lock the zone
* at some point. If there's an allocation
* spinning on this lock, it may win the race
* and cause alloc_contig_range() to fail...
*/
spin_unlock_irqrestore(&z->lock, flags);
ret = __alloc_gigantic_page(pfn, nr_pages);
spin_unlock_irqrestore(&zone->lock, flags);
ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
if (!ret)
return pfn_to_page(pfn);
spin_lock_irqsave(&z->lock, flags);
spin_lock_irqsave(&zone->lock, flags);
}
pfn += nr_pages;
}

spin_unlock_irqrestore(&z->lock, flags);
spin_unlock_irqrestore(&zone->lock, flags);
}

return NULL;
Expand All @@ -1150,7 +1155,7 @@ static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
{
struct page *page;

page = alloc_gigantic_page(nid, huge_page_order(h));
page = alloc_gigantic_page(nid, h);
if (page) {
prep_compound_gigantic_page(page, huge_page_order(h));
prep_new_huge_page(h, page, nid);
Expand Down

0 comments on commit 79b63f1

Please sign in to comment.