Skip to content

Commit

Permalink
mmap locking API: convert mmap_sem call sites missed by coccinelle
Browse files Browse the repository at this point in the history
Convert the last few remaining mmap_sem rwsem calls to use the new mmap
locking API.  These were missed by coccinelle for some reason (I think
coccinelle does not support some of the preprocessor constructs in these
files ?)

[[email protected]: convert linux-next leftovers]
[[email protected]: more linux-next leftovers]
[[email protected]: more linux-next leftovers]

Signed-off-by: Michel Lespinasse <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Reviewed-by: Daniel Jordan <[email protected]>
Reviewed-by: Laurent Dufour <[email protected]>
Reviewed-by: Vlastimil Babka <[email protected]>
Cc: Davidlohr Bueso <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Jason Gunthorpe <[email protected]>
Cc: Jerome Glisse <[email protected]>
Cc: John Hubbard <[email protected]>
Cc: Liam Howlett <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Ying Han <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
walken-google authored and torvalds committed Jun 9, 2020
1 parent d8ed45c commit 89154dd
Show file tree
Hide file tree
Showing 8 changed files with 34 additions and 34 deletions.
14 changes: 7 additions & 7 deletions arch/arm64/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1084,15 +1084,15 @@ void stage2_unmap_vm(struct kvm *kvm)
int idx;

idx = srcu_read_lock(&kvm->srcu);
down_read(&current->mm->mmap_sem);
mmap_read_lock(current->mm);
spin_lock(&kvm->mmu_lock);

slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, slots)
stage2_unmap_memslot(kvm, memslot);

spin_unlock(&kvm->mmu_lock);
up_read(&current->mm->mmap_sem);
mmap_read_unlock(current->mm);
srcu_read_unlock(&kvm->srcu, idx);
}

Expand Down Expand Up @@ -1848,11 +1848,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}

/* Let's check if we will get back a huge page backed by hugetlbfs */
down_read(&current->mm->mmap_sem);
mmap_read_lock(current->mm);
vma = find_vma_intersection(current->mm, hva, hva + 1);
if (unlikely(!vma)) {
kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
up_read(&current->mm->mmap_sem);
mmap_read_unlock(current->mm);
return -EFAULT;
}

Expand All @@ -1879,7 +1879,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (vma_pagesize == PMD_SIZE ||
(vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
up_read(&current->mm->mmap_sem);
mmap_read_unlock(current->mm);

/* We need minimum second+third level pages */
ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
Expand Down Expand Up @@ -2456,7 +2456,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
(kvm_phys_size(kvm) >> PAGE_SHIFT))
return -EFAULT;

down_read(&current->mm->mmap_sem);
mmap_read_lock(current->mm);
/*
* A memory region could potentially cover multiple VMAs, and any holes
* between them, so iterate over all of them to find out if we can map
Expand Down Expand Up @@ -2515,7 +2515,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
out:
up_read(&current->mm->mmap_sem);
mmap_read_unlock(current->mm);
return ret;
}

Expand Down
10 changes: 5 additions & 5 deletions arch/mips/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
down_read(&mm->mmap_sem);
mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
Expand Down Expand Up @@ -190,15 +190,15 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
}
}

up_read(&mm->mmap_sem);
mmap_read_unlock(mm);
return;

/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
mmap_read_unlock(mm);

bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
Expand Down Expand Up @@ -250,14 +250,14 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
up_read(&mm->mmap_sem);
mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;

do_sigbus:
up_read(&mm->mmap_sem);
mmap_read_unlock(mm);

/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
Expand Down
4 changes: 2 additions & 2 deletions arch/riscv/mm/pageattr.c
Original file line number Diff line number Diff line change
Expand Up @@ -117,10 +117,10 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
if (!numpages)
return 0;

down_read(&init_mm.mmap_sem);
mmap_read_lock(&init_mm);
ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
&masks);
up_read(&init_mm.mmap_sem);
mmap_read_unlock(&init_mm);

flush_tlb_kernel_range(start, end);

Expand Down
8 changes: 4 additions & 4 deletions arch/x86/kvm/mmu/paging_tmpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -165,22 +165,22 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
unsigned long pfn;
unsigned long paddr;

down_read(&current->mm->mmap_sem);
mmap_read_lock(current->mm);
vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE);
if (!vma || !(vma->vm_flags & VM_PFNMAP)) {
up_read(&current->mm->mmap_sem);
mmap_read_unlock(current->mm);
return -EFAULT;
}
pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
paddr = pfn << PAGE_SHIFT;
table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB);
if (!table) {
up_read(&current->mm->mmap_sem);
mmap_read_unlock(current->mm);
return -EFAULT;
}
ret = CMPXCHG(&table[index], orig_pte, new_pte);
memunmap(table);
up_read(&current->mm->mmap_sem);
mmap_read_unlock(current->mm);
}

return (ret != orig_pte);
Expand Down
4 changes: 2 additions & 2 deletions drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
Original file line number Diff line number Diff line change
Expand Up @@ -982,9 +982,9 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
}

mutex_unlock(&bo->mutex);
down_read(&current->mm->mmap_sem);
mmap_read_lock(current->mm);
vma = find_vma(current->mm, (unsigned long)userptr);
up_read(&current->mm->mmap_sem);
mmap_read_unlock(current->mm);
if (!vma) {
dev_err(atomisp_dev, "find_vma failed\n");
kfree(bo->page_obj);
Expand Down
8 changes: 4 additions & 4 deletions drivers/vfio/pci/vfio_pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -1422,17 +1422,17 @@ static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
mutex_unlock(&vdev->vma_lock);

if (try) {
if (!down_read_trylock(&mm->mmap_sem)) {
if (!mmap_read_trylock(mm)) {
mmput(mm);
return 0;
}
} else {
down_read(&mm->mmap_sem);
mmap_read_lock(mm);
}
if (mmget_still_valid(mm)) {
if (try) {
if (!mutex_trylock(&vdev->vma_lock)) {
up_read(&mm->mmap_sem);
mmap_read_unlock(mm);
mmput(mm);
return 0;
}
Expand All @@ -1454,7 +1454,7 @@ static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
}
mutex_unlock(&vdev->vma_lock);
}
up_read(&mm->mmap_sem);
mmap_read_unlock(mm);
mmput(mm);
}
}
Expand Down
6 changes: 3 additions & 3 deletions fs/proc/base.c
Original file line number Diff line number Diff line change
Expand Up @@ -2322,7 +2322,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
if (!mm)
goto out_put_task;

ret = down_read_killable(&mm->mmap_sem);
ret = mmap_read_lock_killable(mm);
if (ret) {
mmput(mm);
goto out_put_task;
Expand All @@ -2349,7 +2349,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
p = genradix_ptr_alloc(&fa, nr_files++, GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
up_read(&mm->mmap_sem);
mmap_read_unlock(mm);
mmput(mm);
goto out_put_task;
}
Expand All @@ -2358,7 +2358,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
p->end = vma->vm_end;
p->mode = vma->vm_file->f_mode;
}
up_read(&mm->mmap_sem);
mmap_read_unlock(mm);
mmput(mm);

for (i = 0; i < nr_files; i++) {
Expand Down
14 changes: 7 additions & 7 deletions lib/test_hmm.c
Original file line number Diff line number Diff line change
Expand Up @@ -245,9 +245,9 @@ static int dmirror_range_fault(struct dmirror *dmirror,
}

range->notifier_seq = mmu_interval_read_begin(range->notifier);
down_read(&mm->mmap_sem);
mmap_read_lock(mm);
ret = hmm_range_fault(range);
up_read(&mm->mmap_sem);
mmap_read_unlock(mm);
if (ret) {
if (ret == -EBUSY)
continue;
Expand Down Expand Up @@ -686,7 +686,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
if (!mmget_not_zero(mm))
return -EINVAL;

down_read(&mm->mmap_sem);
mmap_read_lock(mm);
for (addr = start; addr < end; addr = next) {
vma = find_vma(mm, addr);
if (!vma || addr < vma->vm_start ||
Expand All @@ -713,7 +713,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
dmirror_migrate_finalize_and_map(&args, dmirror);
migrate_vma_finalize(&args);
}
up_read(&mm->mmap_sem);
mmap_read_unlock(mm);
mmput(mm);

/* Return the migrated data for verification. */
Expand All @@ -733,7 +733,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
return ret;

out:
up_read(&mm->mmap_sem);
mmap_read_unlock(mm);
mmput(mm);
return ret;
}
Expand Down Expand Up @@ -825,9 +825,9 @@ static int dmirror_range_snapshot(struct dmirror *dmirror,

range->notifier_seq = mmu_interval_read_begin(range->notifier);

down_read(&mm->mmap_sem);
mmap_read_lock(mm);
ret = hmm_range_fault(range);
up_read(&mm->mmap_sem);
mmap_read_unlock(mm);
if (ret) {
if (ret == -EBUSY)
continue;
Expand Down

0 comments on commit 89154dd

Please sign in to comment.