Skip to content

Commit

Permalink
mm/huge_memory: remove stale locking logic from __split_huge_pmd()
Browse files Browse the repository at this point in the history
Let's remove the stale logic that was required for reuse_swap_page().

[[email protected]: simplification, per Yang Shi]

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: David Hildenbrand <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Don Dutile <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Jan Kara <[email protected]>
Cc: Jann Horn <[email protected]>
Cc: Jason Gunthorpe <[email protected]>
Cc: John Hubbard <[email protected]>
Cc: Kirill A. Shutemov <[email protected]>
Cc: Liang Zhang <[email protected]>
Cc: Matthew Wilcox (Oracle) <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Mike Kravetz <[email protected]>
Cc: Mike Rapoport <[email protected]>
Cc: Nadav Amit <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: Peter Xu <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Roman Gushchin <[email protected]>
Cc: Shakeel Butt <[email protected]>
Cc: Yang Shi <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
davidhildenbrand authored and torvalds committed Mar 25, 2022
1 parent 55c62fa commit 7f76091
Showing 1 changed file with 4 additions and 36 deletions.
40 changes: 4 additions & 36 deletions mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -2133,8 +2133,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
{
spinlock_t *ptl;
struct mmu_notifier_range range;
bool do_unlock_folio = false;
pmd_t _pmd;

mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
address & HPAGE_PMD_MASK,
Expand All @@ -2153,42 +2151,12 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
goto out;
}

repeat:
if (pmd_trans_huge(*pmd)) {
if (!folio) {
folio = page_folio(pmd_page(*pmd));
/*
* An anonymous page must be locked, to ensure that a
* concurrent reuse_swap_page() sees stable mapcount;
* but reuse_swap_page() is not used on shmem or file,
* and page lock must not be taken when zap_pmd_range()
* calls __split_huge_pmd() while i_mmap_lock is held.
*/
if (folio_test_anon(folio)) {
if (unlikely(!folio_trylock(folio))) {
folio_get(folio);
_pmd = *pmd;
spin_unlock(ptl);
folio_lock(folio);
spin_lock(ptl);
if (unlikely(!pmd_same(*pmd, _pmd))) {
folio_unlock(folio);
folio_put(folio);
folio = NULL;
goto repeat;
}
folio_put(folio);
}
do_unlock_folio = true;
}
}
} else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
goto out;
__split_huge_pmd_locked(vma, pmd, range.start, freeze);
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
is_pmd_migration_entry(*pmd))
__split_huge_pmd_locked(vma, pmd, range.start, freeze);

out:
spin_unlock(ptl);
if (do_unlock_folio)
folio_unlock(folio);
/*
* No need to double call mmu_notifier->invalidate_range() callback.
* They are 3 cases to consider inside __split_huge_pmd_locked():
Expand Down

0 comments on commit 7f76091

Please sign in to comment.