Skip to content

Commit

Permalink
mm/thp/migration: switch from flush_tlb_range to flush_pmd_tlb_range
Browse files Browse the repository at this point in the history
We remove one instace of flush_tlb_range here.  That was added by commit
f714f4f ("mm: numa: call MMU notifiers on THP migration").  But the
pmdp_huge_clear_flush_notify should have done the require flush for us.
Hence remove the extra flush.

Signed-off-by: Aneesh Kumar K.V <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: "Kirill A. Shutemov" <[email protected]>
Cc: Vineet Gupta <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
kvaneesh authored and torvalds committed Mar 17, 2016
1 parent bcf6691 commit 458aa76
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 17 deletions.
17 changes: 17 additions & 0 deletions include/asm-generic/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -783,6 +783,23 @@ static inline int pmd_clear_huge(pmd_t *pmd)
}
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */

#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* ARCHes with special requirements for evicting THP backing TLB entries can
* implement this. Otherwise also, it can help optimize normal TLB flush in
* THP regime. stock flush_tlb_range() typically has optimization to nuke the
* entire TLB TLB if flush span is greater than a threshold, which will
* likely be true for a single huge page. Thus a single thp flush will
* invalidate the entire TLB which is not desitable.
* e.g. see arch/arc: flush_pmd_tlb_range
*/
#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
#else
#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
#endif
#endif

#endif /* !__ASSEMBLY__ */

#ifndef io_remap_pfn_range
Expand Down
8 changes: 5 additions & 3 deletions mm/migrate.c
Original file line number Diff line number Diff line change
Expand Up @@ -1773,7 +1773,10 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
put_page(new_page);
goto out_fail;
}

/*
* We are not sure a pending tlb flush here is for a huge page
* mapping or not. Hence use the tlb range variant
*/
if (mm_tlb_flush_pending(mm))
flush_tlb_range(vma, mmun_start, mmun_end);

Expand Down Expand Up @@ -1829,12 +1832,11 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
page_add_anon_rmap(new_page, vma, mmun_start, true);
pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
set_pmd_at(mm, mmun_start, pmd, entry);
flush_tlb_range(vma, mmun_start, mmun_end);
update_mmu_cache_pmd(vma, address, &entry);

if (page_count(page) != 2) {
set_pmd_at(mm, mmun_start, pmd, orig_entry);
flush_tlb_range(vma, mmun_start, mmun_end);
flush_pmd_tlb_range(vma, mmun_start, mmun_end);
mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
update_mmu_cache_pmd(vma, address, &entry);
page_remove_rmap(new_page, true);
Expand Down
14 changes: 0 additions & 14 deletions mm/pgtable-generic.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,20 +84,6 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,

#ifdef CONFIG_TRANSPARENT_HUGEPAGE

#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE

/*
* ARCHes with special requirements for evicting THP backing TLB entries can
* implement this. Otherwise also, it can help optimize normal TLB flush in
* THP regime. stock flush_tlb_range() typically has optimization to nuke the
* entire TLB if flush span is greater than a threshold, which will
* likely be true for a single huge page. Thus a single thp flush will
* invalidate the entire TLB which is not desirable.
* e.g. see arch/arc: flush_pmd_tlb_range
*/
#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
#endif

#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
Expand Down

0 comments on commit 458aa76

Please sign in to comment.