Skip to content

Commit

Permalink
mm: trim __do_fault() arguments
Browse files Browse the repository at this point in the history
Use vm_fault structure to pass cow_page, page, and entry in and out of
the function.

That reduces number of __do_fault() arguments from 4 to 1.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Jan Kara <[email protected]>
Reviewed-by: Ross Zwisler <[email protected]>
Acked-by: Kirill A. Shutemov <[email protected]>
Cc: Dan Williams <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
jankara authored and torvalds committed Dec 15, 2016
1 parent 667240e commit 936ca80
Showing 1 changed file with 29 additions and 38 deletions.
67 changes: 29 additions & 38 deletions mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -2844,26 +2844,22 @@ static int do_anonymous_page(struct vm_fault *vmf)
* released depending on flags and vma->vm_ops->fault() return value.
* See filemap_fault() and __lock_page_retry().
*/
static int __do_fault(struct vm_fault *vmf, struct page *cow_page,
struct page **page, void **entry)
static int __do_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
int ret;

vmf->cow_page = cow_page;

ret = vma->vm_ops->fault(vma, vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
if (ret & VM_FAULT_DAX_LOCKED) {
*entry = vmf->entry;
if (ret & VM_FAULT_DAX_LOCKED)
return ret;
}

if (unlikely(PageHWPoison(vmf->page))) {
if (ret & VM_FAULT_LOCKED)
unlock_page(vmf->page);
put_page(vmf->page);
vmf->page = NULL;
return VM_FAULT_HWPOISON;
}

Expand All @@ -2872,7 +2868,6 @@ static int __do_fault(struct vm_fault *vmf, struct page *cow_page,
else
VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);

*page = vmf->page;
return ret;
}

Expand Down Expand Up @@ -3208,7 +3203,6 @@ static int do_fault_around(struct vm_fault *vmf)
static int do_read_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *fault_page;
int ret = 0;

/*
Expand All @@ -3222,75 +3216,72 @@ static int do_read_fault(struct vm_fault *vmf)
return ret;
}

ret = __do_fault(vmf, NULL, &fault_page, NULL);
ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;

ret |= alloc_set_pte(vmf, NULL, fault_page);
ret |= alloc_set_pte(vmf, NULL, vmf->page);
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
unlock_page(fault_page);
unlock_page(vmf->page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
put_page(fault_page);
put_page(vmf->page);
return ret;
}

static int do_cow_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *fault_page, *new_page;
void *fault_entry;
struct mem_cgroup *memcg;
int ret;

if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM;

new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
if (!new_page)
vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
if (!vmf->cow_page)
return VM_FAULT_OOM;

if (mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL,
if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
&memcg, false)) {
put_page(new_page);
put_page(vmf->cow_page);
return VM_FAULT_OOM;
}

ret = __do_fault(vmf, new_page, &fault_page, &fault_entry);
ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
goto uncharge_out;

if (!(ret & VM_FAULT_DAX_LOCKED))
copy_user_highpage(new_page, fault_page, vmf->address, vma);
__SetPageUptodate(new_page);
copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
__SetPageUptodate(vmf->cow_page);

ret |= alloc_set_pte(vmf, memcg, new_page);
ret |= alloc_set_pte(vmf, memcg, vmf->cow_page);
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
if (!(ret & VM_FAULT_DAX_LOCKED)) {
unlock_page(fault_page);
put_page(fault_page);
unlock_page(vmf->page);
put_page(vmf->page);
} else {
dax_unlock_mapping_entry(vma->vm_file->f_mapping, vmf->pgoff);
}
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
goto uncharge_out;
return ret;
uncharge_out:
mem_cgroup_cancel_charge(new_page, memcg, false);
put_page(new_page);
mem_cgroup_cancel_charge(vmf->cow_page, memcg, false);
put_page(vmf->cow_page);
return ret;
}

static int do_shared_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *fault_page;
struct address_space *mapping;
int dirtied = 0;
int ret, tmp;

ret = __do_fault(vmf, NULL, &fault_page, NULL);
ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;

Expand All @@ -3299,35 +3290,35 @@ static int do_shared_fault(struct vm_fault *vmf)
* about to become writable
*/
if (vma->vm_ops->page_mkwrite) {
unlock_page(fault_page);
tmp = do_page_mkwrite(vma, fault_page, vmf->address);
unlock_page(vmf->page);
tmp = do_page_mkwrite(vma, vmf->page, vmf->address);
if (unlikely(!tmp ||
(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
put_page(fault_page);
put_page(vmf->page);
return tmp;
}
}

ret |= alloc_set_pte(vmf, NULL, fault_page);
ret |= alloc_set_pte(vmf, NULL, vmf->page);
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
VM_FAULT_RETRY))) {
unlock_page(fault_page);
put_page(fault_page);
unlock_page(vmf->page);
put_page(vmf->page);
return ret;
}

if (set_page_dirty(fault_page))
if (set_page_dirty(vmf->page))
dirtied = 1;
/*
* Take a local copy of the address_space - page.mapping may be zeroed
* by truncate after unlock_page(). The address_space itself remains
* pinned by vma->vm_file's reference. We rely on unlock_page()'s
* release semantics to prevent the compiler from undoing this copying.
*/
mapping = page_rmapping(fault_page);
unlock_page(fault_page);
mapping = page_rmapping(vmf->page);
unlock_page(vmf->page);
if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
/*
* Some device drivers do not set page.mapping but still
Expand Down

0 comments on commit 936ca80

Please sign in to comment.