Skip to content

Commit

Permalink
mm/gup: handle huge pmd for follow_pmd_mask()
Browse files Browse the repository at this point in the history
Replace pmd_trans_huge() with pmd_leaf() to also cover pmd_huge() as long
as enabled.

FOLL_TOUCH and FOLL_SPLIT_PMD only apply to THP, not yet huge.

Since now follow_trans_huge_pmd() can process hugetlb pages, renaming it
into follow_huge_pmd() to match what it does.  Move it into gup.c so not
depend on CONFIG_THP.

When at it, move the ctx->page_mask setup into follow_huge_pmd(), only set
it when the page is valid.  It was not a bug to set it before even if GUP
failed (page==NULL), because follow_page_mask() callers always ignores
page_mask if so.  But doing so makes the code cleaner.

[[email protected]: allow follow_pmd_mask() to take hugetlb tail pages]
  Link: https://lkml.kernel.org/r/[email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Peter Xu <[email protected]>
Reviewed-by: Jason Gunthorpe <[email protected]>
Tested-by: Ryan Roberts <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: Andrew Jones <[email protected]>
Cc: Aneesh Kumar K.V (IBM) <[email protected]>
Cc: Axel Rasmussen <[email protected]>
Cc: Christophe Leroy <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: David Hildenbrand <[email protected]>
Cc: James Houghton <[email protected]>
Cc: John Hubbard <[email protected]>
Cc: Kirill A. Shutemov <[email protected]>
Cc: Lorenzo Stoakes <[email protected]>
Cc: Matthew Wilcox (Oracle) <[email protected]>
Cc: Michael Ellerman <[email protected]>
Cc: "Mike Rapoport (IBM)" <[email protected]>
Cc: Muchun Song <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Yang Shi <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
xzpeter authored and akpm00 committed Apr 26, 2024
1 parent 1b16761 commit 4418c52
Show file tree
Hide file tree
Showing 3 changed files with 102 additions and 93 deletions.
104 changes: 98 additions & 6 deletions mm/gup.c
Original file line number Diff line number Diff line change
Expand Up @@ -580,13 +580,105 @@ static struct page *follow_huge_pud(struct vm_area_struct *vma,

return page;
}

/* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
struct vm_area_struct *vma,
unsigned int flags)
{
/* If the pmd is writable, we can write to the page. */
if (pmd_write(pmd))
return true;

/* Maybe FOLL_FORCE is set to override it? */
if (!(flags & FOLL_FORCE))
return false;

/* But FOLL_FORCE has no effect on shared mappings */
if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
return false;

/* ... or read-only private ones */
if (!(vma->vm_flags & VM_MAYWRITE))
return false;

/* ... or already writable ones that just need to take a write fault */
if (vma->vm_flags & VM_WRITE)
return false;

/*
* See can_change_pte_writable(): we broke COW and could map the page
* writable if we have an exclusive anonymous page ...
*/
if (!page || !PageAnon(page) || !PageAnonExclusive(page))
return false;

/* ... and a write-fault isn't required for other reasons. */
if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
return false;
return !userfaultfd_huge_pmd_wp(vma, pmd);
}

static struct page *follow_huge_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmd,
unsigned int flags,
struct follow_page_context *ctx)
{
struct mm_struct *mm = vma->vm_mm;
pmd_t pmdval = *pmd;
struct page *page;
int ret;

assert_spin_locked(pmd_lockptr(mm, pmd));

page = pmd_page(pmdval);
if ((flags & FOLL_WRITE) &&
!can_follow_write_pmd(pmdval, page, vma, flags))
return NULL;

/* Avoid dumping huge zero page */
if ((flags & FOLL_DUMP) && is_huge_zero_pmd(pmdval))
return ERR_PTR(-EFAULT);

if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
return NULL;

if (!pmd_write(pmdval) && gup_must_unshare(vma, flags, page))
return ERR_PTR(-EMLINK);

VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
!PageAnonExclusive(page), page);

ret = try_grab_page(page, flags);
if (ret)
return ERR_PTR(ret);

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (pmd_trans_huge(pmdval) && (flags & FOLL_TOUCH))
touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
ctx->page_mask = HPAGE_PMD_NR - 1;

return page;
}

#else /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
static struct page *follow_huge_pud(struct vm_area_struct *vma,
unsigned long addr, pud_t *pudp,
int flags, struct follow_page_context *ctx)
{
return NULL;
}

static struct page *follow_huge_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmd,
unsigned int flags,
struct follow_page_context *ctx)
{
return NULL;
}
#endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */

static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
Expand Down Expand Up @@ -784,31 +876,31 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
return page;
return no_page_table(vma, flags, address);
}
if (likely(!pmd_trans_huge(pmdval)))
if (likely(!pmd_leaf(pmdval)))
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);

if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags))
return no_page_table(vma, flags, address);

ptl = pmd_lock(mm, pmd);
if (unlikely(!pmd_present(*pmd))) {
pmdval = *pmd;
if (unlikely(!pmd_present(pmdval))) {
spin_unlock(ptl);
return no_page_table(vma, flags, address);
}
if (unlikely(!pmd_trans_huge(*pmd))) {
if (unlikely(!pmd_leaf(pmdval))) {
spin_unlock(ptl);
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
}
if (flags & FOLL_SPLIT_PMD) {
if (pmd_trans_huge(pmdval) && (flags & FOLL_SPLIT_PMD)) {
spin_unlock(ptl);
split_huge_pmd(vma, pmd, address);
/* If pmd was left empty, stuff a page table in there quickly */
return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) :
follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
}
page = follow_trans_huge_pmd(vma, address, pmd, flags);
page = follow_huge_pmd(vma, address, pmd, flags, ctx);
spin_unlock(ptl);
ctx->page_mask = HPAGE_PMD_NR - 1;
return page;
}

Expand Down
86 changes: 2 additions & 84 deletions mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1221,8 +1221,8 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */

static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, bool write)
void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, bool write)
{
pmd_t _pmd;

Expand Down Expand Up @@ -1577,88 +1577,6 @@ static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
return pmd_dirty(pmd);
}

/* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
struct vm_area_struct *vma,
unsigned int flags)
{
/* If the pmd is writable, we can write to the page. */
if (pmd_write(pmd))
return true;

/* Maybe FOLL_FORCE is set to override it? */
if (!(flags & FOLL_FORCE))
return false;

/* But FOLL_FORCE has no effect on shared mappings */
if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
return false;

/* ... or read-only private ones */
if (!(vma->vm_flags & VM_MAYWRITE))
return false;

/* ... or already writable ones that just need to take a write fault */
if (vma->vm_flags & VM_WRITE)
return false;

/*
* See can_change_pte_writable(): we broke COW and could map the page
* writable if we have an exclusive anonymous page ...
*/
if (!page || !PageAnon(page) || !PageAnonExclusive(page))
return false;

/* ... and a write-fault isn't required for other reasons. */
if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
return false;
return !userfaultfd_huge_pmd_wp(vma, pmd);
}

struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unsigned long addr,
pmd_t *pmd,
unsigned int flags)
{
struct mm_struct *mm = vma->vm_mm;
struct page *page;
int ret;

assert_spin_locked(pmd_lockptr(mm, pmd));

page = pmd_page(*pmd);
VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);

if ((flags & FOLL_WRITE) &&
!can_follow_write_pmd(*pmd, page, vma, flags))
return NULL;

/* Avoid dumping huge zero page */
if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
return ERR_PTR(-EFAULT);

if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
return NULL;

if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
return ERR_PTR(-EMLINK);

VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
!PageAnonExclusive(page), page);

ret = try_grab_page(page, flags);
if (ret)
return ERR_PTR(ret);

if (flags & FOLL_TOUCH)
touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);

page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);

return page;
}

/* NUMA hinting page fault entry point for trans huge pmds */
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
{
Expand Down
5 changes: 2 additions & 3 deletions mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -1114,9 +1114,8 @@ int __must_check try_grab_page(struct page *page, unsigned int flags);
*/
void touch_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, bool write);
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmd,
unsigned int flags);
void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, bool write);

/*
* mm/mmap.c
Expand Down

0 comments on commit 4418c52

Please sign in to comment.