Skip to content

Commit

Permalink
mm/hugetlb: convert dequeue_hugetlb_page functions to folios
Browse files Browse the repository at this point in the history
dequeue_huge_page_node_exact() is changed to dequeue_hugetlb_folio_node_
exact() and dequeue_huge_page_nodemask() is changed to dequeue_hugetlb_
folio_nodemask().  Update their callers to pass in a folio.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Sidhartha Kumar <[email protected]>
Cc: John Hubbard <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: Mike Kravetz <[email protected]>
Cc: Muchun Song <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
sidkumar99 authored and akpm00 committed Feb 13, 2023
1 parent 6f6956c commit a36f1e9
Showing 1 changed file with 30 additions and 26 deletions.
56 changes: 30 additions & 26 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -1282,32 +1282,33 @@ static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
folio_set_hugetlb_freed(folio);
}

static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
int nid)
{
struct page *page;
struct folio *folio;
bool pin = !!(current->flags & PF_MEMALLOC_PIN);

lockdep_assert_held(&hugetlb_lock);
list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
if (pin && !is_longterm_pinnable_page(page))
list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) {
if (pin && !folio_is_longterm_pinnable(folio))
continue;

if (PageHWPoison(page))
if (folio_test_hwpoison(folio))
continue;

list_move(&page->lru, &h->hugepage_activelist);
set_page_refcounted(page);
ClearHPageFreed(page);
list_move(&folio->lru, &h->hugepage_activelist);
folio_ref_unfreeze(folio, 1);
folio_clear_hugetlb_freed(folio);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
return page;
return folio;
}

return NULL;
}

static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
nodemask_t *nmask)
static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nmask)
{
unsigned int cpuset_mems_cookie;
struct zonelist *zonelist;
Expand All @@ -1320,7 +1321,7 @@ static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask,
retry_cpuset:
cpuset_mems_cookie = read_mems_allowed_begin();
for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
struct page *page;
struct folio *folio;

if (!cpuset_zone_allowed(zone, gfp_mask))
continue;
Expand All @@ -1332,9 +1333,9 @@ static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask,
continue;
node = zone_to_nid(zone);

page = dequeue_huge_page_node_exact(h, node);
if (page)
return page;
folio = dequeue_hugetlb_folio_node_exact(h, node);
if (folio)
return folio;
}
if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset;
Expand All @@ -1352,7 +1353,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
unsigned long address, int avoid_reserve,
long chg)
{
struct page *page = NULL;
struct folio *folio = NULL;
struct mempolicy *mpol;
gfp_t gfp_mask;
nodemask_t *nodemask;
Expand All @@ -1374,22 +1375,24 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);

if (mpol_is_preferred_many(mpol)) {
page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
nid, nodemask);

/* Fallback to all nodes if page==NULL */
nodemask = NULL;
}

if (!page)
page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
if (!folio)
folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
nid, nodemask);

if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
SetHPageRestoreReserve(page);
if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) {
folio_set_hugetlb_restore_reserve(folio);
h->resv_huge_pages--;
}

mpol_cond_put(mpol);
return page;
return &folio->page;

err:
return NULL;
Expand Down Expand Up @@ -2475,12 +2478,13 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
{
spin_lock_irq(&hugetlb_lock);
if (available_huge_pages(h)) {
struct page *page;
struct folio *folio;

page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
if (page) {
folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
preferred_nid, nmask);
if (folio) {
spin_unlock_irq(&hugetlb_lock);
return page;
return &folio->page;
}
}
spin_unlock_irq(&hugetlb_lock);
Expand Down

0 comments on commit a36f1e9

Please sign in to comment.