Skip to content

Commit

Permalink
mm: introduce page_size()
Browse files Browse the repository at this point in the history
Patch series "Make working with compound pages easier", v2.

These three patches add three helpers and convert the appropriate
places to use them.

This patch (of 3):

It's unnecessarily hard to find out the size of a potentially huge page.
Replace 'PAGE_SIZE << compound_order(page)' with page_size(page).

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Reviewed-by: Andrew Morton <[email protected]>
Reviewed-by: Ira Weiny <[email protected]>
Acked-by: Kirill A. Shutemov <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Matthew Wilcox (Oracle) authored and torvalds committed Sep 24, 2019
1 parent 1f18b29 commit a50b854
Show file tree
Hide file tree
Showing 17 changed files with 35 additions and 38 deletions.
3 changes: 1 addition & 2 deletions arch/arm/mm/flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -204,8 +204,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
* coherent with the kernels mapping.
*/
if (!PageHighMem(page)) {
size_t page_size = PAGE_SIZE << compound_order(page);
__cpuc_flush_dcache_area(page_address(page), page_size);
__cpuc_flush_dcache_area(page_address(page), page_size(page));
} else {
unsigned long i;
if (cache_is_vipt_nonaliasing()) {
Expand Down
3 changes: 1 addition & 2 deletions arch/arm64/mm/flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,7 @@ void __sync_icache_dcache(pte_t pte)
struct page *page = pte_page(pte);

if (!test_and_set_bit(PG_dcache_clean, &page->flags))
sync_icache_aliases(page_address(page),
PAGE_SIZE << compound_order(page));
sync_icache_aliases(page_address(page), page_size(page));
}
EXPORT_SYMBOL_GPL(__sync_icache_dcache);

Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ __ia64_sync_icache_dcache (pte_t pte)
if (test_bit(PG_arch_1, &page->flags))
return; /* i-cache is already coherent with d-cache */

flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
flush_icache_range(addr, addr + page_size(page));
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}

Expand Down
5 changes: 2 additions & 3 deletions drivers/crypto/chelsio/chtls/chtls_io.c
Original file line number Diff line number Diff line change
Expand Up @@ -1078,7 +1078,7 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
bool merge;

if (page)
pg_size <<= compound_order(page);
pg_size = page_size(page);
if (off < pg_size &&
skb_can_coalesce(skb, i, page, off)) {
merge = 1;
Expand All @@ -1105,8 +1105,7 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
__GFP_NORETRY,
order);
if (page)
pg_size <<=
compound_order(page);
pg_size <<= order;
}
if (!page) {
page = alloc_page(gfp);
Expand Down
4 changes: 2 additions & 2 deletions drivers/staging/android/ion/ion_system_heap.c
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
if (!page)
goto free_pages;
list_add_tail(&page->lru, &pages);
size_remaining -= PAGE_SIZE << compound_order(page);
size_remaining -= page_size(page);
max_order = compound_order(page);
i++;
}
Expand All @@ -133,7 +133,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,

sg = table->sgl;
list_for_each_entry_safe(page, tmp_page, &pages, lru) {
sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
sg_set_page(sg, page, page_size(page), 0);
sg = sg_next(sg);
list_del(&page->lru);
}
Expand Down
3 changes: 1 addition & 2 deletions drivers/target/tcm_fc/tfc_io.c
Original file line number Diff line number Diff line change
Expand Up @@ -136,8 +136,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
page, off_in_page, tlen);
fr_len(fp) += tlen;
fp_skb(fp)->data_len += tlen;
fp_skb(fp)->truesize +=
PAGE_SIZE << compound_order(page);
fp_skb(fp)->truesize += page_size(page);
} else {
BUG_ON(!page);
from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
Expand Down
2 changes: 1 addition & 1 deletion fs/io_uring.c
Original file line number Diff line number Diff line change
Expand Up @@ -3319,7 +3319,7 @@ static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
}

page = virt_to_head_page(ptr);
if (sz > (PAGE_SIZE << compound_order(page)))
if (sz > page_size(page))
return -EINVAL;

pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
Expand Down
2 changes: 1 addition & 1 deletion include/linux/hugetlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -454,7 +454,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
static inline struct hstate *page_hstate(struct page *page)
{
VM_BUG_ON_PAGE(!PageHuge(page), page);
return size_to_hstate(PAGE_SIZE << compound_order(page));
return size_to_hstate(page_size(page));
}

static inline unsigned hstate_index_to_shift(unsigned index)
Expand Down
6 changes: 6 additions & 0 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -805,6 +805,12 @@ static inline void set_compound_order(struct page *page, unsigned int order)
page[1].compound_order = order;
}

/* Returns the number of bytes in this potentially compound page. */
static inline unsigned long page_size(struct page *page)
{
return PAGE_SIZE << compound_order(page);
}

void free_compound_page(struct page *page);

#ifdef CONFIG_MMU
Expand Down
2 changes: 1 addition & 1 deletion lib/iov_iter.c
Original file line number Diff line number Diff line change
Expand Up @@ -878,7 +878,7 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
head = compound_head(page);
v += (page - head) << PAGE_SHIFT;

if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
if (likely(n <= v && v <= (page_size(head))))
return true;
WARN_ON(1);
return false;
Expand Down
8 changes: 3 additions & 5 deletions mm/kasan/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -338,8 +338,7 @@ void kasan_poison_slab(struct page *page)

for (i = 0; i < (1 << compound_order(page)); i++)
page_kasan_tag_reset(page + i);
kasan_poison_shadow(page_address(page),
PAGE_SIZE << compound_order(page),
kasan_poison_shadow(page_address(page), page_size(page),
KASAN_KMALLOC_REDZONE);
}

Expand Down Expand Up @@ -542,7 +541,7 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
page = virt_to_page(ptr);
redzone_start = round_up((unsigned long)(ptr + size),
KASAN_SHADOW_SCALE_SIZE);
redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
redzone_end = (unsigned long)ptr + page_size(page);

kasan_unpoison_shadow(ptr, size);
kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
Expand Down Expand Up @@ -578,8 +577,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
kasan_report_invalid_free(ptr, ip);
return;
}
kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
KASAN_FREE_PAGE);
kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
} else {
__kasan_slab_free(page->slab_cache, ptr, ip, false);
}
Expand Down
2 changes: 1 addition & 1 deletion mm/nommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ unsigned int kobjsize(const void *objp)
* The ksize() function is only guaranteed to work for pointers
* returned by kmalloc(). So handle arbitrary pointers here.
*/
return PAGE_SIZE << compound_order(page);
return page_size(page);
}

/**
Expand Down
3 changes: 1 addition & 2 deletions mm/page_vma_mapped.c
Original file line number Diff line number Diff line change
Expand Up @@ -153,8 +153,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)

if (unlikely(PageHuge(pvmw->page))) {
/* when pud is not present, pte will be NULL */
pvmw->pte = huge_pte_offset(mm, pvmw->address,
PAGE_SIZE << compound_order(page));
pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
if (!pvmw->pte)
return false;

Expand Down
6 changes: 2 additions & 4 deletions mm/rmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -898,8 +898,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
*/
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
0, vma, vma->vm_mm, address,
min(vma->vm_end, address +
(PAGE_SIZE << compound_order(page))));
min(vma->vm_end, address + page_size(page)));
mmu_notifier_invalidate_range_start(&range);

while (page_vma_mapped_walk(&pvmw)) {
Expand Down Expand Up @@ -1372,8 +1371,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
*/
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
address,
min(vma->vm_end, address +
(PAGE_SIZE << compound_order(page))));
min(vma->vm_end, address + page_size(page)));
if (PageHuge(page)) {
/*
* If sharing is possible, start and end will be adjusted
Expand Down
2 changes: 1 addition & 1 deletion mm/slob.c
Original file line number Diff line number Diff line change
Expand Up @@ -539,7 +539,7 @@ size_t __ksize(const void *block)

sp = virt_to_page(block);
if (unlikely(!PageSlab(sp)))
return PAGE_SIZE << compound_order(sp);
return page_size(sp);

align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
m = (unsigned int *)(block - align);
Expand Down
18 changes: 9 additions & 9 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -829,7 +829,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
return 1;

start = page_address(page);
length = PAGE_SIZE << compound_order(page);
length = page_size(page);
end = start + length;
remainder = length % s->size;
if (!remainder)
Expand Down Expand Up @@ -1074,13 +1074,14 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
init_tracking(s, object);
}

static void setup_page_debug(struct kmem_cache *s, void *addr, int order)
static
void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
{
if (!(s->flags & SLAB_POISON))
return;

metadata_access_enable();
memset(addr, POISON_INUSE, PAGE_SIZE << order);
memset(addr, POISON_INUSE, page_size(page));
metadata_access_disable();
}

Expand Down Expand Up @@ -1340,8 +1341,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
#else /* !CONFIG_SLUB_DEBUG */
static inline void setup_object_debug(struct kmem_cache *s,
struct page *page, void *object) {}
static inline void setup_page_debug(struct kmem_cache *s,
void *addr, int order) {}
static inline
void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {}

static inline int alloc_debug_processing(struct kmem_cache *s,
struct page *page, void *object, unsigned long addr) { return 0; }
Expand Down Expand Up @@ -1639,7 +1640,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
struct kmem_cache_order_objects oo = s->oo;
gfp_t alloc_gfp;
void *start, *p, *next;
int idx, order;
int idx;
bool shuffle;

flags &= gfp_allowed_mask;
Expand Down Expand Up @@ -1673,7 +1674,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)

page->objects = oo_objects(oo);

order = compound_order(page);
page->slab_cache = s;
__SetPageSlab(page);
if (page_is_pfmemalloc(page))
Expand All @@ -1683,7 +1683,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)

start = page_address(page);

setup_page_debug(s, start, order);
setup_page_debug(s, page, start);

shuffle = shuffle_freelist(s, page);

Expand Down Expand Up @@ -3932,7 +3932,7 @@ size_t __ksize(const void *object)

if (unlikely(!PageSlab(page))) {
WARN_ON(!PageCompound(page));
return PAGE_SIZE << compound_order(page);
return page_size(page);
}

return slab_ksize(page->slab_cache);
Expand Down
2 changes: 1 addition & 1 deletion net/xdp/xsk.c
Original file line number Diff line number Diff line change
Expand Up @@ -977,7 +977,7 @@ static int xsk_mmap(struct file *file, struct socket *sock,
/* Matches the smp_wmb() in xsk_init_queue */
smp_rmb();
qpg = virt_to_head_page(q->ring);
if (size > (PAGE_SIZE << compound_order(qpg)))
if (size > page_size(qpg))
return -EINVAL;

pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
Expand Down

0 comments on commit a50b854

Please sign in to comment.