Skip to content

Commit

Permalink
mlock: fix unevictable_pgs event counts on THP
Browse files Browse the repository at this point in the history
5.8 commit 5d91f31 ("mm: swap: fix vmstats for huge page") has
established that vm_events should count every subpage of a THP, including
unevictable_pgs_culled and unevictable_pgs_rescued; but
lru_cache_add_inactive_or_unevictable() was not doing so for
unevictable_pgs_mlocked, and mm/mlock.c was not doing so for
unevictable_pgs mlocked, munlocked, cleared and stranded.

Fix them; but THPs don't go the pagevec way in mlock.c, so no fixes needed
on that path.

Fixes: 5d91f31 ("mm: swap: fix vmstats for huge page")
Signed-off-by: Hugh Dickins <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Reviewed-by: Shakeel Butt <[email protected]>
Acked-by: Yang Shi <[email protected]>
Cc: Alex Shi <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Mike Kravetz <[email protected]>
Cc: Qian Cai <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Hugh Dickins authored and torvalds committed Sep 19, 2020
1 parent 8d8869c commit 0964730
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 12 deletions.
24 changes: 15 additions & 9 deletions mm/mlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,14 @@ EXPORT_SYMBOL(can_do_mlock);
*/
void clear_page_mlock(struct page *page)
{
int nr_pages;

if (!TestClearPageMlocked(page))
return;

mod_zone_page_state(page_zone(page), NR_MLOCK, -thp_nr_pages(page));
count_vm_event(UNEVICTABLE_PGCLEARED);
nr_pages = thp_nr_pages(page);
mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
/*
* The previous TestClearPageMlocked() corresponds to the smp_mb()
* in __pagevec_lru_add_fn().
Expand All @@ -76,7 +79,7 @@ void clear_page_mlock(struct page *page)
* We lost the race. the page already moved to evictable list.
*/
if (PageUnevictable(page))
count_vm_event(UNEVICTABLE_PGSTRANDED);
count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
}
}

Expand All @@ -93,9 +96,10 @@ void mlock_vma_page(struct page *page)
VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);

if (!TestSetPageMlocked(page)) {
mod_zone_page_state(page_zone(page), NR_MLOCK,
thp_nr_pages(page));
count_vm_event(UNEVICTABLE_PGMLOCKED);
int nr_pages = thp_nr_pages(page);

mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
if (!isolate_lru_page(page))
putback_lru_page(page);
}
Expand Down Expand Up @@ -138,7 +142,7 @@ static void __munlock_isolated_page(struct page *page)

/* Did try_to_unlock() succeed or punt? */
if (!PageMlocked(page))
count_vm_event(UNEVICTABLE_PGMUNLOCKED);
count_vm_events(UNEVICTABLE_PGMUNLOCKED, thp_nr_pages(page));

putback_lru_page(page);
}
Expand All @@ -154,10 +158,12 @@ static void __munlock_isolated_page(struct page *page)
*/
static void __munlock_isolation_failed(struct page *page)
{
int nr_pages = thp_nr_pages(page);

if (PageUnevictable(page))
__count_vm_event(UNEVICTABLE_PGSTRANDED);
__count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
else
__count_vm_event(UNEVICTABLE_PGMUNLOCKED);
__count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
}

/**
Expand Down
6 changes: 3 additions & 3 deletions mm/swap.c
Original file line number Diff line number Diff line change
Expand Up @@ -494,14 +494,14 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,

unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED;
if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
int nr_pages = thp_nr_pages(page);
/*
* We use the irq-unsafe __mod_zone_page_stat because this
* counter is not modified from interrupt context, and the pte
* lock is held(spinlock), which implies preemption disabled.
*/
__mod_zone_page_state(page_zone(page), NR_MLOCK,
thp_nr_pages(page));
count_vm_event(UNEVICTABLE_PGMLOCKED);
__mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
}
lru_cache_add(page);
}
Expand Down

0 comments on commit 0964730

Please sign in to comment.