Skip to content

Commit

Permalink
mm/gup: move gup_must_unshare() to mm/internal.h
Browse files Browse the repository at this point in the history
This function is only used in gup.c and closely related.  It touches
FOLL_PIN so it must be moved before the next patch.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Jason Gunthorpe <[email protected]>
Reviewed-by: John Hubbard <[email protected]>
Reviewed-by: David Hildenbrand <[email protected]>
Cc: Alistair Popple <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Claudio Imbrenda <[email protected]>
Cc: David Howells <[email protected]>
Cc: Mike Rapoport (IBM) <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
jgunthorpe authored and akpm00 committed Feb 10, 2023
1 parent 9198a91 commit 63b6051
Show file tree
Hide file tree
Showing 2 changed files with 65 additions and 65 deletions.
65 changes: 0 additions & 65 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -3180,71 +3180,6 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
return 0;
}

/*
* Indicates for which pages that are write-protected in the page table,
* whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
* GUP pin will remain consistent with the pages mapped into the page tables
* of the MM.
*
* Temporary unmapping of PageAnonExclusive() pages or clearing of
* PageAnonExclusive() has to protect against concurrent GUP:
* * Ordinary GUP: Using the PT lock
* * GUP-fast and fork(): mm->write_protect_seq
* * GUP-fast and KSM or temporary unmapping (swap, migration): see
* page_try_share_anon_rmap()
*
* Must be called with the (sub)page that's actually referenced via the
* page table entry, which might not necessarily be the head page for a
* PTE-mapped THP.
*
* If the vma is NULL, we're coming from the GUP-fast path and might have
* to fallback to the slow path just to lookup the vma.
*/
static inline bool gup_must_unshare(struct vm_area_struct *vma,
unsigned int flags, struct page *page)
{
/*
* FOLL_WRITE is implicitly handled correctly as the page table entry
* has to be writable -- and if it references (part of) an anonymous
* folio, that part is required to be marked exclusive.
*/
if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
return false;
/*
* Note: PageAnon(page) is stable until the page is actually getting
* freed.
*/
if (!PageAnon(page)) {
/*
* We only care about R/O long-term pining: R/O short-term
* pinning does not have the semantics to observe successive
* changes through the process page tables.
*/
if (!(flags & FOLL_LONGTERM))
return false;

/* We really need the vma ... */
if (!vma)
return true;

/*
* ... because we only care about writable private ("COW")
* mappings where we have to break COW early.
*/
return is_cow_mapping(vma->vm_flags);
}

/* Paired with a memory barrier in page_try_share_anon_rmap(). */
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
smp_rmb();

/*
* Note that PageKsm() pages cannot be exclusive, and consequently,
* cannot get pinned.
*/
return !PageAnonExclusive(page);
}

/*
* Indicates whether GUP can follow a PROT_NONE mapped page, or whether
* a (NUMA hinting) fault is required.
Expand Down
65 changes: 65 additions & 0 deletions mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -858,6 +858,71 @@ int migrate_device_coherent_page(struct page *page);
struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
int __must_check try_grab_page(struct page *page, unsigned int flags);

/*
* Indicates for which pages that are write-protected in the page table,
* whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
* GUP pin will remain consistent with the pages mapped into the page tables
* of the MM.
*
* Temporary unmapping of PageAnonExclusive() pages or clearing of
* PageAnonExclusive() has to protect against concurrent GUP:
* * Ordinary GUP: Using the PT lock
* * GUP-fast and fork(): mm->write_protect_seq
* * GUP-fast and KSM or temporary unmapping (swap, migration): see
* page_try_share_anon_rmap()
*
* Must be called with the (sub)page that's actually referenced via the
* page table entry, which might not necessarily be the head page for a
* PTE-mapped THP.
*
* If the vma is NULL, we're coming from the GUP-fast path and might have
* to fallback to the slow path just to lookup the vma.
*/
static inline bool gup_must_unshare(struct vm_area_struct *vma,
unsigned int flags, struct page *page)
{
/*
* FOLL_WRITE is implicitly handled correctly as the page table entry
* has to be writable -- and if it references (part of) an anonymous
* folio, that part is required to be marked exclusive.
*/
if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
return false;
/*
* Note: PageAnon(page) is stable until the page is actually getting
* freed.
*/
if (!PageAnon(page)) {
/*
* We only care about R/O long-term pining: R/O short-term
* pinning does not have the semantics to observe successive
* changes through the process page tables.
*/
if (!(flags & FOLL_LONGTERM))
return false;

/* We really need the vma ... */
if (!vma)
return true;

/*
* ... because we only care about writable private ("COW")
* mappings where we have to break COW early.
*/
return is_cow_mapping(vma->vm_flags);
}

/* Paired with a memory barrier in page_try_share_anon_rmap(). */
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
smp_rmb();

/*
* Note that PageKsm() pages cannot be exclusive, and consequently,
* cannot get pinned.
*/
return !PageAnonExclusive(page);
}

extern bool mirrored_kernelcore;

static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
Expand Down

0 comments on commit 63b6051

Please sign in to comment.