Skip to content

Commit

Permalink
mm/rmap: Convert rmap_walk() to take a folio
Browse files Browse the repository at this point in the history
This ripples all the way through to every calling and called function
from rmap.

Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
  • Loading branch information
Matthew Wilcox (Oracle) committed Mar 21, 2022
1 parent e05b345 commit 2f031c6
Show file tree
Hide file tree
Showing 9 changed files with 80 additions and 99 deletions.
4 changes: 2 additions & 2 deletions include/linux/ksm.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ static inline void ksm_exit(struct mm_struct *mm)
struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);

void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);

#else /* !CONFIG_KSM */
Expand All @@ -78,7 +78,7 @@ static inline struct page *ksm_might_need_to_copy(struct page *page,
return page;
}

static inline void rmap_walk_ksm(struct page *page,
static inline void rmap_walk_ksm(struct folio *folio,
struct rmap_walk_control *rwc)
{
}
Expand Down
11 changes: 5 additions & 6 deletions include/linux/rmap.h
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,6 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
/*
* Called by memory-failure.c to kill processes.
*/
struct anon_vma *page_lock_anon_vma_read(struct page *page);
struct anon_vma *folio_lock_anon_vma_read(struct folio *folio);
void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
Expand All @@ -286,15 +285,15 @@ struct rmap_walk_control {
* Return false if page table scanning in rmap_walk should be stopped.
* Otherwise, return true.
*/
bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg);
int (*done)(struct page *page);
struct anon_vma *(*anon_lock)(struct page *page);
int (*done)(struct folio *folio);
struct anon_vma *(*anon_lock)(struct folio *folio);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};

void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);

#else /* !CONFIG_MMU */

Expand Down
15 changes: 7 additions & 8 deletions mm/damon/paddr.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@
#include "../internal.h"
#include "prmtv-common.h"

static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0);
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);

while (page_vma_mapped_walk(&pvmw)) {
addr = pvmw.address;
Expand All @@ -37,7 +37,7 @@ static void damon_pa_mkold(unsigned long paddr)
struct page *page = damon_get_page(PHYS_PFN(paddr));
struct rmap_walk_control rwc = {
.rmap_one = __damon_pa_mkold,
.anon_lock = page_lock_anon_vma_read,
.anon_lock = folio_lock_anon_vma_read,
};
bool need_lock;

Expand All @@ -54,7 +54,7 @@ static void damon_pa_mkold(unsigned long paddr)
if (need_lock && !folio_trylock(folio))
goto out;

rmap_walk(&folio->page, &rwc);
rmap_walk(folio, &rwc);

if (need_lock)
folio_unlock(folio);
Expand Down Expand Up @@ -87,10 +87,9 @@ struct damon_pa_access_chk_result {
bool accessed;
};

static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma,
static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
struct folio *folio = page_folio(page);
struct damon_pa_access_chk_result *result = arg;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);

Expand Down Expand Up @@ -133,7 +132,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
struct rmap_walk_control rwc = {
.arg = &result,
.rmap_one = __damon_pa_young,
.anon_lock = page_lock_anon_vma_read,
.anon_lock = folio_lock_anon_vma_read,
};
bool need_lock;

Expand All @@ -156,7 +155,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
return NULL;
}

rmap_walk(&folio->page, &rwc);
rmap_walk(folio, &rwc);

if (need_lock)
folio_unlock(folio);
Expand Down
7 changes: 0 additions & 7 deletions mm/folio-compat.c
Original file line number Diff line number Diff line change
Expand Up @@ -164,10 +164,3 @@ void putback_lru_page(struct page *page)
{
folio_putback_lru(page_folio(page));
}

#ifdef CONFIG_MMU
struct anon_vma *page_lock_anon_vma_read(struct page *page)
{
return folio_lock_anon_vma_read(page_folio(page));
}
#endif
2 changes: 1 addition & 1 deletion mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -2572,7 +2572,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
* The caller does not necessarily hold an mmap_lock that would
* prevent the anon_vma disappearing so we first we take a
* reference to it and then lock the anon_vma for write. This
* is similar to page_lock_anon_vma_read except the write lock
* is similar to folio_lock_anon_vma_read except the write lock
* is taken to serialise against parallel split or collapse
* operations.
*/
Expand Down
12 changes: 6 additions & 6 deletions mm/ksm.c
Original file line number Diff line number Diff line change
Expand Up @@ -2588,21 +2588,21 @@ struct page *ksm_might_need_to_copy(struct page *page,
return new_page;
}

void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
{
struct stable_node *stable_node;
struct rmap_item *rmap_item;
int search_new_forks = 0;

VM_BUG_ON_PAGE(!PageKsm(page), page);
VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio);

/*
* Rely on the page lock to protect against concurrent modifications
* to that page's node of the stable tree.
*/
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);

stable_node = page_stable_node(page);
stable_node = folio_stable_node(folio);
if (!stable_node)
return;
again:
Expand Down Expand Up @@ -2637,11 +2637,11 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;

if (!rwc->rmap_one(page, vma, addr, rwc->arg)) {
if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) {
anon_vma_unlock_read(anon_vma);
return;
}
if (rwc->done && rwc->done(page)) {
if (rwc->done && rwc->done(folio)) {
anon_vma_unlock_read(anon_vma);
return;
}
Expand Down
10 changes: 4 additions & 6 deletions mm/migrate.c
Original file line number Diff line number Diff line change
Expand Up @@ -171,13 +171,11 @@ void putback_movable_pages(struct list_head *l)
/*
* Restore a potential migration pte to a working pte entry
*/
static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *old)
static bool remove_migration_pte(struct folio *folio,
struct vm_area_struct *vma, unsigned long addr, void *old)
{
struct folio *folio = page_folio(page);
DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);

VM_BUG_ON_PAGE(PageTail(page), page);
while (page_vma_mapped_walk(&pvmw)) {
pte_t pte;
swp_entry_t entry;
Expand Down Expand Up @@ -269,9 +267,9 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
};

if (locked)
rmap_walk_locked(&dst->page, &rwc);
rmap_walk_locked(dst, &rwc);
else
rmap_walk(&dst->page, &rwc);
rmap_walk(dst, &rwc);
}

/*
Expand Down
7 changes: 3 additions & 4 deletions mm/page_idle.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,11 +46,10 @@ static struct page *page_idle_get_page(unsigned long pfn)
return page;
}

static bool page_idle_clear_pte_refs_one(struct page *page,
static bool page_idle_clear_pte_refs_one(struct folio *folio,
struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
struct folio *folio = page_folio(page);
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
bool referenced = false;

Expand Down Expand Up @@ -93,7 +92,7 @@ static void page_idle_clear_pte_refs(struct page *page)
*/
static const struct rmap_walk_control rwc = {
.rmap_one = page_idle_clear_pte_refs_one,
.anon_lock = page_lock_anon_vma_read,
.anon_lock = folio_lock_anon_vma_read,
};
bool need_lock;

Expand All @@ -104,7 +103,7 @@ static void page_idle_clear_pte_refs(struct page *page)
if (need_lock && !folio_trylock(folio))
return;

rmap_walk(&folio->page, (struct rmap_walk_control *)&rwc);
rmap_walk(folio, (struct rmap_walk_control *)&rwc);

if (need_lock)
folio_unlock(folio);
Expand Down
Loading

0 comments on commit 2f031c6

Please sign in to comment.