Skip to content

Commit

Permalink
filemap: pass vm_fault to the mmap ra helpers
Browse files Browse the repository at this point in the history
All of the arguments to these functions come from the vmf.

Cut down on the amount of arguments passed by simply passing in the vmf
to these two helpers.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Josef Bacik <[email protected]>
Reviewed-by: Andrew Morton <[email protected]>
Reviewed-by: Jan Kara <[email protected]>
Cc: Dave Chinner <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: "Kirill A. Shutemov" <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
josefbacik authored and torvalds committed Mar 14, 2019
1 parent ebc551f commit 2a1180f
Showing 1 changed file with 14 additions and 14 deletions.
28 changes: 14 additions & 14 deletions mm/filemap.c
Original file line number Diff line number Diff line change
Expand Up @@ -2420,20 +2420,20 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
* Synchronous readahead happens when we don't even find
* a page in the page cache at all.
*/
static void do_sync_mmap_readahead(struct vm_area_struct *vma,
struct file_ra_state *ra,
struct file *file,
pgoff_t offset)
static void do_sync_mmap_readahead(struct vm_fault *vmf)
{
struct file *file = vmf->vma->vm_file;
struct file_ra_state *ra = &file->f_ra;
struct address_space *mapping = file->f_mapping;
pgoff_t offset = vmf->pgoff;

/* If we don't want any read-ahead, don't bother */
if (vma->vm_flags & VM_RAND_READ)
if (vmf->vma->vm_flags & VM_RAND_READ)
return;
if (!ra->ra_pages)
return;

if (vma->vm_flags & VM_SEQ_READ) {
if (vmf->vma->vm_flags & VM_SEQ_READ) {
page_cache_sync_readahead(mapping, ra, file, offset,
ra->ra_pages);
return;
Expand Down Expand Up @@ -2463,16 +2463,16 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
* Asynchronous readahead happens when we find the page and PG_readahead,
* so we want to possibly extend the readahead further..
*/
static void do_async_mmap_readahead(struct vm_area_struct *vma,
struct file_ra_state *ra,
struct file *file,
struct page *page,
pgoff_t offset)
static void do_async_mmap_readahead(struct vm_fault *vmf,
struct page *page)
{
struct file *file = vmf->vma->vm_file;
struct file_ra_state *ra = &file->f_ra;
struct address_space *mapping = file->f_mapping;
pgoff_t offset = vmf->pgoff;

/* If we don't want any read-ahead, don't bother */
if (vma->vm_flags & VM_RAND_READ)
if (vmf->vma->vm_flags & VM_RAND_READ)
return;
if (ra->mmap_miss > 0)
ra->mmap_miss--;
Expand Down Expand Up @@ -2531,10 +2531,10 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
* We found the page, so try async readahead before
* waiting for the lock.
*/
do_async_mmap_readahead(vmf->vma, ra, file, page, offset);
do_async_mmap_readahead(vmf, page);
} else if (!page) {
/* No page in the page cache at all */
do_sync_mmap_readahead(vmf->vma, ra, file, offset);
do_sync_mmap_readahead(vmf);
count_vm_event(PGMAJFAULT);
count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
ret = VM_FAULT_MAJOR;
Expand Down

0 comments on commit 2a1180f

Please sign in to comment.