Skip to content

Commit

Permalink
mm: rename vma_pgoff_address back to vma_address
Browse files Browse the repository at this point in the history
With all callers converted, we can use the nice shorter name.  Take this
opportunity to reorder the arguments to the logical order (larger object
first).

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
Matthew Wilcox (Oracle) authored and akpm00 committed Apr 26, 2024
1 parent 412ad5f commit e0abfbb
Show file tree
Hide file tree
Showing 4 changed files with 12 additions and 13 deletions.
9 changes: 4 additions & 5 deletions mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -805,17 +805,16 @@ void mlock_drain_remote(int cpu);
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);

/**
* vma_pgoff_address - Find the virtual address a page range is mapped at
* vma_address - Find the virtual address a page range is mapped at
* @vma: The vma which maps this object.
* @pgoff: The page offset within its object.
* @nr_pages: The number of pages to consider.
* @vma: The vma which maps this object.
*
* If any page in this range is mapped by this VMA, return the first address
* where any of these pages appear. Otherwise, return -EFAULT.
*/
static inline unsigned long
vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
struct vm_area_struct *vma)
static inline unsigned long vma_address(struct vm_area_struct *vma,
pgoff_t pgoff, unsigned long nr_pages)
{
unsigned long address;

Expand Down
2 changes: 1 addition & 1 deletion mm/memory-failure.c
Original file line number Diff line number Diff line change
Expand Up @@ -455,7 +455,7 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p,
tk->addr = ksm_addr ? ksm_addr : page_address_in_vma(p, vma);
if (is_zone_device_page(p)) {
if (fsdax_pgoff != FSDAX_INVALID_PGOFF)
tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
tk->addr = vma_address(vma, fsdax_pgoff, 1);
tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
} else
tk->size_shift = page_shift(compound_head(p));
Expand Down
2 changes: 1 addition & 1 deletion mm/page_vma_mapped.c
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
.flags = PVMW_SYNC,
};

pvmw.address = vma_pgoff_address(pgoff, 1, vma);
pvmw.address = vma_address(vma, pgoff, 1);
if (pvmw.address == -EFAULT)
return 0;
if (!page_vma_mapped_walk(&pvmw))
Expand Down
12 changes: 6 additions & 6 deletions mm/rmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -794,7 +794,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)

/* The !page__anon_vma above handles KSM folios */
pgoff = folio->index + folio_page_idx(folio, page);
return vma_pgoff_address(pgoff, 1, vma);
return vma_address(vma, pgoff, 1);
}

/*
Expand Down Expand Up @@ -1132,7 +1132,7 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
if (invalid_mkclean_vma(vma, NULL))
return 0;

pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma);
pvmw.address = vma_address(vma, pgoff, nr_pages);
VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma);

return page_vma_mkclean_one(&pvmw);
Expand Down Expand Up @@ -2592,8 +2592,8 @@ static void rmap_walk_anon(struct folio *folio,
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
pgoff_start, pgoff_end) {
struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_pgoff_address(pgoff_start,
folio_nr_pages(folio), vma);
unsigned long address = vma_address(vma, pgoff_start,
folio_nr_pages(folio));

VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
Expand Down Expand Up @@ -2654,8 +2654,8 @@ static void rmap_walk_file(struct folio *folio,
lookup:
vma_interval_tree_foreach(vma, &mapping->i_mmap,
pgoff_start, pgoff_end) {
unsigned long address = vma_pgoff_address(pgoff_start,
folio_nr_pages(folio), vma);
unsigned long address = vma_address(vma, pgoff_start,
folio_nr_pages(folio));

VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
Expand Down

0 comments on commit e0abfbb

Please sign in to comment.