Skip to content

Commit

Permalink
thp: introduce deferred_split_huge_page()
Browse files Browse the repository at this point in the history
Currently we don't split huge page on partial unmap.  It's not an ideal
situation.  It can lead to memory overhead.

Furtunately, we can detect partial unmap on page_remove_rmap().  But we
cannot call split_huge_page() from there due to locking context.

It's also counterproductive to do directly from munmap() codepath: in
many cases we will hit this from exit(2) and splitting the huge page
just to free it up in small pages is not what we really want.

The patch introduce deferred_split_huge_page() which put the huge page
into queue for splitting.  The splitting itself will happen when we get
memory pressure via shrinker interface.  The page will be dropped from
list on freeing through compound page destructor.

Signed-off-by: Kirill A. Shutemov <[email protected]>
Tested-by: Sasha Levin <[email protected]>
Tested-by: Aneesh Kumar K.V <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
Acked-by: Jerome Marchand <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Naoya Horiguchi <[email protected]>
Cc: Steve Capper <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: David Rientjes <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
kiryl authored and torvalds committed Jan 16, 2016
1 parent 248db92 commit 9a98225
Show file tree
Hide file tree
Showing 7 changed files with 174 additions and 12 deletions.
5 changes: 5 additions & 0 deletions include/linux/huge_mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,11 +90,15 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma);

extern unsigned long transparent_hugepage_flags;

extern void prep_transhuge_page(struct page *page);
extern void free_transhuge_page(struct page *page);

int split_huge_page_to_list(struct page *page, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
return split_huge_page_to_list(page, NULL);
}
void deferred_split_huge_page(struct page *page);

void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address);
Expand Down Expand Up @@ -170,6 +174,7 @@ static inline int split_huge_page(struct page *page)
{
return 0;
}
static inline void deferred_split_huge_page(struct page *page) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
static inline int hugepage_madvise(struct vm_area_struct *vma,
Expand Down
5 changes: 5 additions & 0 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -507,6 +507,9 @@ enum compound_dtor_id {
COMPOUND_PAGE_DTOR,
#ifdef CONFIG_HUGETLB_PAGE
HUGETLB_PAGE_DTOR,
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
TRANSHUGE_PAGE_DTOR,
#endif
NR_COMPOUND_DTORS,
};
Expand Down Expand Up @@ -537,6 +540,8 @@ static inline void set_compound_order(struct page *page, unsigned int order)
page[1].compound_order = order;
}

void free_compound_page(struct page *page);

#ifdef CONFIG_MMU
/*
* Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
Expand Down
2 changes: 2 additions & 0 deletions include/linux/mm_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,13 +55,15 @@ struct page {
*/
void *s_mem; /* slab first object */
atomic_t compound_mapcount; /* first tail page */
/* page_deferred_list().next -- second tail page */
};

/* Second double word */
struct {
union {
pgoff_t index; /* Our offset within mapping. */
void *freelist; /* sl[aou]b first free object */
/* page_deferred_list().prev -- second tail page */
};

union {
Expand Down
139 changes: 135 additions & 4 deletions mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,10 @@ static struct khugepaged_scan khugepaged_scan = {
.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
};

static DEFINE_SPINLOCK(split_queue_lock);
static LIST_HEAD(split_queue);
static unsigned long split_queue_len;
static struct shrinker deferred_split_shrinker;

static void set_recommended_min_free_kbytes(void)
{
Expand Down Expand Up @@ -667,6 +671,9 @@ static int __init hugepage_init(void)
err = register_shrinker(&huge_zero_page_shrinker);
if (err)
goto err_hzp_shrinker;
err = register_shrinker(&deferred_split_shrinker);
if (err)
goto err_split_shrinker;

/*
* By default disable transparent hugepages on smaller systems,
Expand All @@ -684,6 +691,8 @@ static int __init hugepage_init(void)

return 0;
err_khugepaged:
unregister_shrinker(&deferred_split_shrinker);
err_split_shrinker:
unregister_shrinker(&huge_zero_page_shrinker);
err_hzp_shrinker:
khugepaged_slab_exit();
Expand Down Expand Up @@ -740,6 +749,27 @@ static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
return entry;
}

static inline struct list_head *page_deferred_list(struct page *page)
{
/*
* ->lru in the tail pages is occupied by compound_head.
* Let's use ->mapping + ->index in the second tail page as list_head.
*/
return (struct list_head *)&page[2].mapping;
}

void prep_transhuge_page(struct page *page)
{
/*
* we use page->mapping and page->indexlru in second tail page
* as list_head: assuming THP order >= 2
*/
BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);

INIT_LIST_HEAD(page_deferred_list(page));
set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
}

static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd,
Expand Down Expand Up @@ -896,6 +926,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK;
}
prep_transhuge_page(page);
return __do_huge_pmd_anonymous_page(mm, vma, address, pmd, page, gfp,
flags);
}
Expand Down Expand Up @@ -1192,7 +1223,9 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
} else
new_page = NULL;

if (unlikely(!new_page)) {
if (likely(new_page)) {
prep_transhuge_page(new_page);
} else {
if (!page) {
split_huge_pmd(vma, pmd, address);
ret |= VM_FAULT_FALLBACK;
Expand Down Expand Up @@ -2109,6 +2142,7 @@ khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
return NULL;
}

prep_transhuge_page(*hpage);
count_vm_event(THP_COLLAPSE_ALLOC);
return *hpage;
}
Expand All @@ -2120,8 +2154,12 @@ static int khugepaged_find_target_node(void)

static inline struct page *alloc_hugepage(int defrag)
{
return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
HPAGE_PMD_ORDER);
struct page *page;

page = alloc_pages(alloc_hugepage_gfpmask(defrag, 0), HPAGE_PMD_ORDER);
if (page)
prep_transhuge_page(page);
return page;
}

static struct page *khugepaged_alloc_hugepage(bool *wait)
Expand Down Expand Up @@ -3098,7 +3136,7 @@ static int __split_huge_page_tail(struct page *head, int tail,
set_page_idle(page_tail);

/* ->mapping in first tail page is compound_mapcount */
VM_BUG_ON_PAGE(tail != 1 && page_tail->mapping != TAIL_MAPPING,
VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
page_tail);
page_tail->mapping = head->mapping;

Expand Down Expand Up @@ -3207,19 +3245,28 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
freeze_page(anon_vma, head);
VM_BUG_ON_PAGE(compound_mapcount(head), head);

/* Prevent deferred_split_scan() touching ->_count */
spin_lock(&split_queue_lock);
count = page_count(head);
mapcount = total_mapcount(head);
if (mapcount == count - 1) {
if (!list_empty(page_deferred_list(head))) {
split_queue_len--;
list_del(page_deferred_list(head));
}
spin_unlock(&split_queue_lock);
__split_huge_page(page, list);
ret = 0;
} else if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount > count - 1) {
spin_unlock(&split_queue_lock);
pr_alert("total_mapcount: %u, page_count(): %u\n",
mapcount, count);
if (PageTail(page))
dump_page(head, NULL);
dump_page(page, "total_mapcount(head) > page_count(head) - 1");
BUG();
} else {
spin_unlock(&split_queue_lock);
unfreeze_page(anon_vma, head);
ret = -EBUSY;
}
Expand All @@ -3231,3 +3278,87 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
return ret;
}

void free_transhuge_page(struct page *page)
{
unsigned long flags;

spin_lock_irqsave(&split_queue_lock, flags);
if (!list_empty(page_deferred_list(page))) {
split_queue_len--;
list_del(page_deferred_list(page));
}
spin_unlock_irqrestore(&split_queue_lock, flags);
free_compound_page(page);
}

void deferred_split_huge_page(struct page *page)
{
unsigned long flags;

VM_BUG_ON_PAGE(!PageTransHuge(page), page);

spin_lock_irqsave(&split_queue_lock, flags);
if (list_empty(page_deferred_list(page))) {
list_add_tail(page_deferred_list(page), &split_queue);
split_queue_len++;
}
spin_unlock_irqrestore(&split_queue_lock, flags);
}

static unsigned long deferred_split_count(struct shrinker *shrink,
struct shrink_control *sc)
{
/*
* Split a page from split_queue will free up at least one page,
* at most HPAGE_PMD_NR - 1. We don't track exact number.
* Let's use HPAGE_PMD_NR / 2 as ballpark.
*/
return ACCESS_ONCE(split_queue_len) * HPAGE_PMD_NR / 2;
}

static unsigned long deferred_split_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
unsigned long flags;
LIST_HEAD(list), *pos, *next;
struct page *page;
int split = 0;

spin_lock_irqsave(&split_queue_lock, flags);
list_splice_init(&split_queue, &list);

/* Take pin on all head pages to avoid freeing them under us */
list_for_each_safe(pos, next, &list) {
page = list_entry((void *)pos, struct page, mapping);
page = compound_head(page);
/* race with put_compound_page() */
if (!get_page_unless_zero(page)) {
list_del_init(page_deferred_list(page));
split_queue_len--;
}
}
spin_unlock_irqrestore(&split_queue_lock, flags);

list_for_each_safe(pos, next, &list) {
page = list_entry((void *)pos, struct page, mapping);
lock_page(page);
/* split_huge_page() removes page from list on success */
if (!split_huge_page(page))
split++;
unlock_page(page);
put_page(page);
}

spin_lock_irqsave(&split_queue_lock, flags);
list_splice_tail(&list, &split_queue);
spin_unlock_irqrestore(&split_queue_lock, flags);

return split * HPAGE_PMD_NR / 2;
}

static struct shrinker deferred_split_shrinker = {
.count_objects = deferred_split_count,
.scan_objects = deferred_split_scan,
.seeks = DEFAULT_SEEKS,
};
1 change: 1 addition & 0 deletions mm/migrate.c
Original file line number Diff line number Diff line change
Expand Up @@ -1760,6 +1760,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
HPAGE_PMD_ORDER);
if (!new_page)
goto out_fail;
prep_transhuge_page(new_page);

isolated = numamigrate_isolate_page(pgdat, page);
if (!isolated) {
Expand Down
27 changes: 20 additions & 7 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -222,13 +222,15 @@ static char * const zone_names[MAX_NR_ZONES] = {
#endif
};

static void free_compound_page(struct page *page);
compound_page_dtor * const compound_page_dtors[] = {
NULL,
free_compound_page,
#ifdef CONFIG_HUGETLB_PAGE
free_huge_page,
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
free_transhuge_page,
#endif
};

int min_free_kbytes = 1024;
Expand Down Expand Up @@ -450,7 +452,7 @@ static void bad_page(struct page *page, const char *reason,
* This usage means that zero-order pages may not be compound.
*/

static void free_compound_page(struct page *page)
void free_compound_page(struct page *page)
{
__free_pages_ok(page, compound_order(page));
}
Expand Down Expand Up @@ -858,15 +860,26 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
ret = 0;
goto out;
}
/* mapping in first tail page is used for compound_mapcount() */
if (page - head_page == 1) {
switch (page - head_page) {
case 1:
/* the first tail page: ->mapping is compound_mapcount() */
if (unlikely(compound_mapcount(page))) {
bad_page(page, "nonzero compound_mapcount", 0);
goto out;
}
} else if (page->mapping != TAIL_MAPPING) {
bad_page(page, "corrupted mapping in tail page", 0);
goto out;
break;
case 2:
/*
* the second tail page: ->mapping is
* page_deferred_list().next -- ignore value.
*/
break;
default:
if (page->mapping != TAIL_MAPPING) {
bad_page(page, "corrupted mapping in tail page", 0);
goto out;
}
break;
}
if (unlikely(!PageTail(page))) {
bad_page(page, "PageTail not set", 0);
Expand Down
7 changes: 6 additions & 1 deletion mm/rmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -1282,8 +1282,10 @@ static void page_remove_anon_compound_rmap(struct page *page)
nr = HPAGE_PMD_NR;
}

if (nr)
if (nr) {
__mod_zone_page_state(page_zone(page), NR_ANON_PAGES, -nr);
deferred_split_huge_page(page);
}
}

/**
Expand Down Expand Up @@ -1318,6 +1320,9 @@ void page_remove_rmap(struct page *page, bool compound)
if (unlikely(PageMlocked(page)))
clear_page_mlock(page);

if (PageTransCompound(page))
deferred_split_huge_page(compound_head(page));

/*
* It would be tidy to reset the PageAnon mapping here,
* but that might overwrite a racing page_add_anon_rmap
Expand Down

0 comments on commit 9a98225

Please sign in to comment.