Skip to content

Commit

Permalink
mm: soft-dirty: keep soft-dirty bits over thp migration
Browse files Browse the repository at this point in the history
Soft dirty bit is designed to keep tracked over page migration.  This
patch makes it work in the same manner for thp migration too.

Signed-off-by: Naoya Horiguchi <[email protected]>
Signed-off-by: Zi Yan <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Anshuman Khandual <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: David Nellans <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Kirill A. Shutemov <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: Michal Hocko <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Naoya Horiguchi authored and torvalds committed Sep 9, 2017
1 parent 84c3fc4 commit ab6e3d0
Show file tree
Hide file tree
Showing 5 changed files with 92 additions and 15 deletions.
17 changes: 17 additions & 0 deletions arch/x86/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -1172,6 +1172,23 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
}

#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
{
return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
}

static inline int pmd_swp_soft_dirty(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
}

static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
{
return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
}
#endif
#endif

#define PKRU_AD_BIT 0x1
Expand Down
27 changes: 16 additions & 11 deletions fs/proc/task_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -978,17 +978,22 @@ static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
{
pmd_t pmd = *pmdp;

/* See comment in change_huge_pmd() */
pmdp_invalidate(vma, addr, pmdp);
if (pmd_dirty(*pmdp))
pmd = pmd_mkdirty(pmd);
if (pmd_young(*pmdp))
pmd = pmd_mkyoung(pmd);

pmd = pmd_wrprotect(pmd);
pmd = pmd_clear_soft_dirty(pmd);

set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
if (pmd_present(pmd)) {
/* See comment in change_huge_pmd() */
pmdp_invalidate(vma, addr, pmdp);
if (pmd_dirty(*pmdp))
pmd = pmd_mkdirty(pmd);
if (pmd_young(*pmdp))
pmd = pmd_mkyoung(pmd);

pmd = pmd_wrprotect(pmd);
pmd = pmd_clear_soft_dirty(pmd);

set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
pmd = pmd_swp_clear_soft_dirty(pmd);
set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
}
}
#else
static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
Expand Down
34 changes: 33 additions & 1 deletion include/asm-generic/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -630,7 +630,24 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
#define arch_start_context_switch(prev) do {} while (0)
#endif

#ifndef CONFIG_HAVE_ARCH_SOFT_DIRTY
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
{
return pmd;
}

static inline int pmd_swp_soft_dirty(pmd_t pmd)
{
return 0;
}

static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
{
return pmd;
}
#endif
#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
static inline int pte_soft_dirty(pte_t pte)
{
return 0;
Expand Down Expand Up @@ -675,6 +692,21 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
return pte;
}

static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
{
return pmd;
}

static inline int pmd_swp_soft_dirty(pmd_t pmd)
{
return 0;
}

static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
{
return pmd;
}
#endif

#ifndef __HAVE_PFNMAP_TRACKING
Expand Down
2 changes: 2 additions & 0 deletions include/linux/swapops.h
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,8 @@ static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
{
swp_entry_t arch_entry;

if (pmd_swp_soft_dirty(pmd))
pmd = pmd_swp_clear_soft_dirty(pmd);
arch_entry = __pmd_to_swp_entry(pmd);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
Expand Down
27 changes: 24 additions & 3 deletions mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -937,6 +937,8 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (is_write_migration_entry(entry)) {
make_migration_entry_read(&entry);
pmd = swp_entry_to_pmd(entry);
if (pmd_swp_soft_dirty(*src_pmd))
pmd = pmd_swp_mksoft_dirty(pmd);
set_pmd_at(src_mm, addr, src_pmd, pmd);
}
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
Expand Down Expand Up @@ -1756,6 +1758,17 @@ static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
}
#endif

static pmd_t move_soft_dirty_pmd(pmd_t pmd)
{
#ifdef CONFIG_MEM_SOFT_DIRTY
if (unlikely(is_pmd_migration_entry(pmd)))
pmd = pmd_swp_mksoft_dirty(pmd);
else if (pmd_present(pmd))
pmd = pmd_mksoft_dirty(pmd);
#endif
return pmd;
}

bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
Expand Down Expand Up @@ -1798,7 +1811,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
}
set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
pmd = move_soft_dirty_pmd(pmd);
set_pmd_at(mm, new_addr, new_pmd, pmd);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
if (force_flush)
Expand Down Expand Up @@ -1846,6 +1860,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
*/
make_migration_entry_read(&entry);
newpmd = swp_entry_to_pmd(entry);
if (pmd_swp_soft_dirty(*pmd))
newpmd = pmd_swp_mksoft_dirty(newpmd);
set_pmd_at(mm, addr, pmd, newpmd);
}
goto unlock;
Expand Down Expand Up @@ -2824,6 +2840,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
unsigned long address = pvmw->address;
pmd_t pmdval;
swp_entry_t entry;
pmd_t pmdswp;

if (!(pvmw->pmd && !pvmw->pte))
return;
Expand All @@ -2837,8 +2854,10 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
if (pmd_dirty(pmdval))
set_page_dirty(page);
entry = make_migration_entry(page, pmd_write(pmdval));
pmdval = swp_entry_to_pmd(entry);
set_pmd_at(mm, address, pvmw->pmd, pmdval);
pmdswp = swp_entry_to_pmd(entry);
if (pmd_soft_dirty(pmdval))
pmdswp = pmd_swp_mksoft_dirty(pmdswp);
set_pmd_at(mm, address, pvmw->pmd, pmdswp);
page_remove_rmap(page, true);
put_page(page);

Expand All @@ -2861,6 +2880,8 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
entry = pmd_to_swp_entry(*pvmw->pmd);
get_page(new);
pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot));
if (pmd_swp_soft_dirty(*pvmw->pmd))
pmde = pmd_mksoft_dirty(pmde);
if (is_write_migration_entry(entry))
pmde = maybe_pmd_mkwrite(pmde, vma);

Expand Down

0 comments on commit ab6e3d0

Please sign in to comment.