Skip to content

Commit

Permalink
mm: make rmap_one boolean function
Browse files Browse the repository at this point in the history
rmap_one's return value controls whether rmap_work should contine to
scan other ptes or not so it's target for changing to boolean.  Return
true if the scan should be continued.  Otherwise, return false to stop
the scanning.

This patch makes rmap_one's return value to boolean.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Minchan Kim <[email protected]>
Cc: Anshuman Khandual <[email protected]>
Cc: Hillf Danton <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Kirill A. Shutemov <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Naoya Horiguchi <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
minchank authored and torvalds committed May 3, 2017
1 parent 1df631a commit e4b8222
Show file tree
Hide file tree
Showing 5 changed files with 25 additions and 21 deletions.
6 changes: 5 additions & 1 deletion include/linux/rmap.h
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,11 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
*/
struct rmap_walk_control {
void *arg;
int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
/*
* Return false if page table scanning in rmap_walk should be stopped.
* Otherwise, return true.
*/
bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *arg);
int (*done)(struct page *page);
struct anon_vma *(*anon_lock)(struct page *page);
Expand Down
2 changes: 1 addition & 1 deletion mm/ksm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1977,7 +1977,7 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;

if (SWAP_AGAIN != rwc->rmap_one(page, vma,
if (!rwc->rmap_one(page, vma,
rmap_item->address, rwc->arg)) {
anon_vma_unlock_read(anon_vma);
return;
Expand Down
4 changes: 2 additions & 2 deletions mm/migrate.c
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ void putback_movable_pages(struct list_head *l)
/*
* Restore a potential migration pte to a working pte entry
*/
static int remove_migration_pte(struct page *page, struct vm_area_struct *vma,
static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *old)
{
struct page_vma_mapped_walk pvmw = {
Expand Down Expand Up @@ -253,7 +253,7 @@ static int remove_migration_pte(struct page *page, struct vm_area_struct *vma,
update_mmu_cache(vma, pvmw.address, pvmw.pte);
}

return SWAP_AGAIN;
return true;
}

/*
Expand Down
4 changes: 2 additions & 2 deletions mm/page_idle.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ static struct page *page_idle_get_page(unsigned long pfn)
return page;
}

static int page_idle_clear_pte_refs_one(struct page *page,
static bool page_idle_clear_pte_refs_one(struct page *page,
struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
Expand Down Expand Up @@ -84,7 +84,7 @@ static int page_idle_clear_pte_refs_one(struct page *page,
*/
set_page_young(page);
}
return SWAP_AGAIN;
return true;
}

static void page_idle_clear_pte_refs(struct page *page)
Expand Down
30 changes: 15 additions & 15 deletions mm/rmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -724,7 +724,7 @@ struct page_referenced_arg {
/*
* arg: page_referenced_arg will be passed
*/
static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct page_referenced_arg *pra = arg;
Expand All @@ -741,7 +741,7 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
if (vma->vm_flags & VM_LOCKED) {
page_vma_mapped_walk_done(&pvmw);
pra->vm_flags |= VM_LOCKED;
return SWAP_FAIL; /* To break the loop */
return false; /* To break the loop */
}

if (pvmw.pte) {
Expand Down Expand Up @@ -781,9 +781,9 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
}

if (!pra->mapcount)
return SWAP_SUCCESS; /* To break the loop */
return false; /* To break the loop */

return SWAP_AGAIN;
return true;
}

static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
Expand Down Expand Up @@ -854,7 +854,7 @@ int page_referenced(struct page *page,
return pra.referenced;
}

static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct page_vma_mapped_walk pvmw = {
Expand Down Expand Up @@ -907,7 +907,7 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
}
}

return SWAP_AGAIN;
return true;
}

static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
Expand Down Expand Up @@ -1290,7 +1290,7 @@ void page_remove_rmap(struct page *page, bool compound)
/*
* @arg: enum ttu_flags will be passed to this argument
*/
static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
Expand All @@ -1301,12 +1301,12 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
};
pte_t pteval;
struct page *subpage;
int ret = SWAP_AGAIN;
bool ret = true;
enum ttu_flags flags = (enum ttu_flags)arg;

/* munlock has nothing to gain from examining un-locked vmas */
if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
return SWAP_AGAIN;
return true;

if (flags & TTU_SPLIT_HUGE_PMD) {
split_huge_pmd_address(vma, address,
Expand All @@ -1329,7 +1329,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
*/
mlock_vma_page(page);
}
ret = SWAP_FAIL;
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
}
Expand All @@ -1347,7 +1347,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
if (!(flags & TTU_IGNORE_ACCESS)) {
if (ptep_clear_flush_young_notify(vma, address,
pvmw.pte)) {
ret = SWAP_FAIL;
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
}
Expand Down Expand Up @@ -1437,14 +1437,14 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
*/
set_pte_at(mm, address, pvmw.pte, pteval);
SetPageSwapBacked(page);
ret = SWAP_FAIL;
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
}

if (swap_duplicate(entry) < 0) {
set_pte_at(mm, address, pvmw.pte, pteval);
ret = SWAP_FAIL;
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
}
Expand Down Expand Up @@ -1636,7 +1636,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;

if (SWAP_AGAIN != rwc->rmap_one(page, vma, address, rwc->arg))
if (!rwc->rmap_one(page, vma, address, rwc->arg))
break;
if (rwc->done && rwc->done(page))
break;
Expand Down Expand Up @@ -1690,7 +1690,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;

if (SWAP_AGAIN != rwc->rmap_one(page, vma, address, rwc->arg))
if (!rwc->rmap_one(page, vma, address, rwc->arg))
goto done;
if (rwc->done && rwc->done(page))
goto done;
Expand Down

0 comments on commit e4b8222

Please sign in to comment.