Skip to content

Commit

Permalink
mm: factor out common parts of write fault handling
Browse files Browse the repository at this point in the history
Currently we duplicate handling of shared write faults in
wp_page_reuse() and do_shared_fault().  Factor them out into a common
function.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Jan Kara <[email protected]>
Reviewed-by: Ross Zwisler <[email protected]>
Acked-by: Kirill A. Shutemov <[email protected]>
Cc: Dan Williams <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
jankara authored and torvalds committed Dec 15, 2016
1 parent b1aa812 commit 97ba0c2
Showing 1 changed file with 37 additions and 41 deletions.
78 changes: 37 additions & 41 deletions mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -2062,6 +2062,41 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
return ret;
}

/*
* Handle dirtying of a page in shared file mapping on a write fault.
*
* The function expects the page to be locked and unlocks it.
*/
static void fault_dirty_shared_page(struct vm_area_struct *vma,
struct page *page)
{
struct address_space *mapping;
bool dirtied;
bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;

dirtied = set_page_dirty(page);
VM_BUG_ON_PAGE(PageAnon(page), page);
/*
* Take a local copy of the address_space - page.mapping may be zeroed
* by truncate after unlock_page(). The address_space itself remains
* pinned by vma->vm_file's reference. We rely on unlock_page()'s
* release semantics to prevent the compiler from undoing this copying.
*/
mapping = page_rmapping(page);
unlock_page(page);

if ((dirtied || page_mkwrite) && mapping) {
/*
* Some device drivers do not set page.mapping
* but still dirty their pages
*/
balance_dirty_pages_ratelimited(mapping);
}

if (!page_mkwrite)
file_update_time(vma->vm_file);
}

/*
* Handle write page faults for pages that can be reused in the current vma
*
Expand Down Expand Up @@ -2092,28 +2127,11 @@ static inline int wp_page_reuse(struct vm_fault *vmf, struct page *page,
pte_unmap_unlock(vmf->pte, vmf->ptl);

if (dirty_shared) {
struct address_space *mapping;
int dirtied;

if (!page_mkwrite)
lock_page(page);

dirtied = set_page_dirty(page);
VM_BUG_ON_PAGE(PageAnon(page), page);
mapping = page->mapping;
unlock_page(page);
fault_dirty_shared_page(vma, page);
put_page(page);

if ((dirtied || page_mkwrite) && mapping) {
/*
* Some device drivers do not set page.mapping
* but still dirty their pages
*/
balance_dirty_pages_ratelimited(mapping);
}

if (!page_mkwrite)
file_update_time(vma->vm_file);
}

return VM_FAULT_WRITE;
Expand Down Expand Up @@ -3294,8 +3312,6 @@ static int do_cow_fault(struct vm_fault *vmf)
static int do_shared_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct address_space *mapping;
int dirtied = 0;
int ret, tmp;

ret = __do_fault(vmf);
Expand Down Expand Up @@ -3324,27 +3340,7 @@ static int do_shared_fault(struct vm_fault *vmf)
return ret;
}

if (set_page_dirty(vmf->page))
dirtied = 1;
/*
* Take a local copy of the address_space - page.mapping may be zeroed
* by truncate after unlock_page(). The address_space itself remains
* pinned by vma->vm_file's reference. We rely on unlock_page()'s
* release semantics to prevent the compiler from undoing this copying.
*/
mapping = page_rmapping(vmf->page);
unlock_page(vmf->page);
if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
/*
* Some device drivers do not set page.mapping but still
* dirty their pages
*/
balance_dirty_pages_ratelimited(mapping);
}

if (!vma->vm_ops->page_mkwrite)
file_update_time(vma->vm_file);

fault_dirty_shared_page(vma, vmf->page);
return ret;
}

Expand Down

0 comments on commit 97ba0c2

Please sign in to comment.