Skip to content

Commit

Permalink
mmu_gather: Force tlb-flush VM_PFNMAP vmas
Browse files Browse the repository at this point in the history
Jann reported a race between munmap() and unmap_mapping_range(), where
unmap_mapping_range() will no-op once unmap_vmas() has unlinked the
VMA; however munmap() will not yet have invalidated the TLBs.

Therefore unmap_mapping_range() will complete while there are still
(stale) TLB entries for the specified range.

Mitigate this by force flushing TLBs for VM_PFNMAP ranges.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Acked-by: Will Deacon <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Peter Zijlstra authored and torvalds committed Jul 21, 2022
1 parent 18ba064 commit b67fbeb
Showing 1 changed file with 17 additions and 16 deletions.
33 changes: 17 additions & 16 deletions include/asm-generic/tlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -303,6 +303,7 @@ struct mmu_gather {
*/
unsigned int vma_exec : 1;
unsigned int vma_huge : 1;
unsigned int vma_pfn : 1;

unsigned int batch_count;

Expand Down Expand Up @@ -373,7 +374,6 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
#else /* CONFIG_MMU_GATHER_NO_RANGE */

#ifndef tlb_flush

/*
* When an architecture does not provide its own tlb_flush() implementation
* but does have a reasonably efficient flush_vma_range() implementation
Expand All @@ -393,6 +393,9 @@ static inline void tlb_flush(struct mmu_gather *tlb)
flush_tlb_range(&vma, tlb->start, tlb->end);
}
}
#endif

#endif /* CONFIG_MMU_GATHER_NO_RANGE */

static inline void
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
Expand All @@ -410,17 +413,9 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
*/
tlb->vma_huge = is_vm_hugetlb_page(vma);
tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
}

#else

static inline void
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }

#endif

#endif /* CONFIG_MMU_GATHER_NO_RANGE */

static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
/*
Expand Down Expand Up @@ -507,16 +502,22 @@ static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *

static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS))
if (tlb->fullmm)
return;

/*
* Do a TLB flush and reset the range at VMA boundaries; this avoids
* the ranges growing with the unused space between consecutive VMAs,
* but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
* this.
* VM_PFNMAP is more fragile because the core mm will not track the
* page mapcount -- there might not be page-frames for these PFNs after
* all. Force flush TLBs for such ranges to avoid munmap() vs
* unmap_mapping_range() races.
*/
tlb_flush_mmu_tlbonly(tlb);
if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
/*
* Do a TLB flush and reset the range at VMA boundaries; this avoids
* the ranges growing with the unused space between consecutive VMAs.
*/
tlb_flush_mmu_tlbonly(tlb);
}
}

/*
Expand Down

0 comments on commit b67fbeb

Please sign in to comment.