Skip to content

Commit

Permalink
mm: replace hpage_nr_pages with thp_nr_pages
Browse files Browse the repository at this point in the history
The thp prefix is more frequently used than hpage and we should be
consistent between the various functions.

[[email protected]: fix mm/migrate.c]

Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Reviewed-by: William Kucharski <[email protected]>
Reviewed-by: Zi Yan <[email protected]>
Cc: Mike Kravetz <[email protected]>
Cc: David Hildenbrand <[email protected]>
Cc: "Kirill A. Shutemov" <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Matthew Wilcox (Oracle) authored and torvalds committed Aug 15, 2020
1 parent af3bbc1 commit 6c35784
Show file tree
Hide file tree
Showing 20 changed files with 65 additions and 62 deletions.
13 changes: 9 additions & 4 deletions include/linux/huge_mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -271,9 +271,14 @@ static inline unsigned int thp_order(struct page *page)
return 0;
}

static inline int hpage_nr_pages(struct page *page)
/**
* thp_nr_pages - The number of regular pages in this huge page.
* @page: The head page of a huge page.
*/
static inline int thp_nr_pages(struct page *page)
{
if (unlikely(PageTransHuge(page)))
VM_BUG_ON_PGFLAGS(PageTail(page), page);
if (PageHead(page))
return HPAGE_PMD_NR;
return 1;
}
Expand Down Expand Up @@ -336,9 +341,9 @@ static inline unsigned int thp_order(struct page *page)
return 0;
}

static inline int hpage_nr_pages(struct page *page)
static inline int thp_nr_pages(struct page *page)
{
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PGFLAGS(PageTail(page), page);
return 1;
}

Expand Down
6 changes: 3 additions & 3 deletions include/linux/mm_inline.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,22 +48,22 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
static __always_inline void add_page_to_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
list_add(&page->lru, &lruvec->lists[lru]);
}

static __always_inline void add_page_to_lru_list_tail(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
list_add_tail(&page->lru, &lruvec->lists[lru]);
}

static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
list_del(&page->lru);
update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page));
}

/**
Expand Down
6 changes: 3 additions & 3 deletions include/linux/pagemap.h
Original file line number Diff line number Diff line change
Expand Up @@ -381,7 +381,7 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
if (PageHuge(head))
return head;

return head + (index & (hpage_nr_pages(head) - 1));
return head + (index & (thp_nr_pages(head) - 1));
}

struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
Expand Down Expand Up @@ -773,7 +773,7 @@ static inline struct page *readahead_page(struct readahead_control *rac)

page = xa_load(&rac->mapping->i_pages, rac->_index);
VM_BUG_ON_PAGE(!PageLocked(page), page);
rac->_batch_count = hpage_nr_pages(page);
rac->_batch_count = thp_nr_pages(page);

return page;
}
Expand All @@ -796,7 +796,7 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageTail(page), page);
array[i++] = page;
rac->_batch_count += hpage_nr_pages(page);
rac->_batch_count += thp_nr_pages(page);

/*
* The page cache isn't using multi-index entries yet,
Expand Down
2 changes: 1 addition & 1 deletion mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -1009,7 +1009,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
del_page_from_lru_list(page, lruvec, page_lru(page));
mod_node_page_state(page_pgdat(page),
NR_ISOLATED_ANON + page_is_file_lru(page),
hpage_nr_pages(page));
thp_nr_pages(page));

isolate_success:
list_add(&page->lru, &cc->migratepages);
Expand Down
2 changes: 1 addition & 1 deletion mm/filemap.c
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ static void unaccount_page_cache_page(struct address_space *mapping,
if (PageHuge(page))
return;

nr = hpage_nr_pages(page);
nr = thp_nr_pages(page);

__mod_lruvec_page_state(page, NR_FILE_PAGES, -nr);
if (PageSwapBacked(page)) {
Expand Down
2 changes: 1 addition & 1 deletion mm/gup.c
Original file line number Diff line number Diff line change
Expand Up @@ -1637,7 +1637,7 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON +
page_is_file_lru(head),
hpage_nr_pages(head));
thp_nr_pages(head));
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,7 @@ extern void clear_page_mlock(struct page *page);
static inline void mlock_migrate_page(struct page *newpage, struct page *page)
{
if (TestClearPageMlocked(page)) {
int nr_pages = hpage_nr_pages(page);
int nr_pages = thp_nr_pages(page);

/* Holding pmd lock, no change in irq context: __mod is safe */
__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
Expand Down
10 changes: 5 additions & 5 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -5589,7 +5589,7 @@ static int mem_cgroup_move_account(struct page *page,
{
struct lruvec *from_vec, *to_vec;
struct pglist_data *pgdat;
unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
int ret;

VM_BUG_ON(from == to);
Expand Down Expand Up @@ -6682,7 +6682,7 @@ void mem_cgroup_calculate_protection(struct mem_cgroup *root,
*/
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
{
unsigned int nr_pages = hpage_nr_pages(page);
unsigned int nr_pages = thp_nr_pages(page);
struct mem_cgroup *memcg = NULL;
int ret = 0;

Expand Down Expand Up @@ -6912,7 +6912,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
return;

/* Force-charge the new page. The old one will be freed soon */
nr_pages = hpage_nr_pages(newpage);
nr_pages = thp_nr_pages(newpage);

page_counter_charge(&memcg->memory, nr_pages);
if (do_memsw_account())
Expand Down Expand Up @@ -7114,7 +7114,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
* ancestor for the swap instead and transfer the memory+swap charge.
*/
swap_memcg = mem_cgroup_id_get_online(memcg);
nr_entries = hpage_nr_pages(page);
nr_entries = thp_nr_pages(page);
/* Get references for the tail pages, too */
if (nr_entries > 1)
mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
Expand Down Expand Up @@ -7158,7 +7158,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
*/
int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
{
unsigned int nr_pages = hpage_nr_pages(page);
unsigned int nr_pages = thp_nr_pages(page);
struct page_counter *counter;
struct mem_cgroup *memcg;
unsigned short oldid;
Expand Down
7 changes: 3 additions & 4 deletions mm/memory_hotplug.c
Original file line number Diff line number Diff line change
Expand Up @@ -1299,23 +1299,22 @@ static int
do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long pfn;
struct page *page;
struct page *page, *head;
int ret = 0;
LIST_HEAD(source);

for (pfn = start_pfn; pfn < end_pfn; pfn++) {
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
head = compound_head(page);

if (PageHuge(page)) {
struct page *head = compound_head(page);
pfn = page_to_pfn(head) + compound_nr(head) - 1;
isolate_huge_page(head, &source);
continue;
} else if (PageTransHuge(page))
pfn = page_to_pfn(compound_head(page))
+ hpage_nr_pages(page) - 1;
pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;

/*
* HWPoison pages have elevated reference counts so the migration would
Expand Down
2 changes: 1 addition & 1 deletion mm/mempolicy.c
Original file line number Diff line number Diff line change
Expand Up @@ -1049,7 +1049,7 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist,
list_add_tail(&head->lru, pagelist);
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON + page_is_file_lru(head),
hpage_nr_pages(head));
thp_nr_pages(head));
} else if (flags & MPOL_MF_STRICT) {
/*
* Non-movable page may reach here. And, there may be
Expand Down
18 changes: 9 additions & 9 deletions mm/migrate.c
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ void putback_movable_pages(struct list_head *l)
put_page(page);
} else {
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
page_is_file_lru(page), -hpage_nr_pages(page));
page_is_file_lru(page), -thp_nr_pages(page));
putback_lru_page(page);
}
}
Expand Down Expand Up @@ -386,7 +386,7 @@ static int expected_page_refs(struct address_space *mapping, struct page *page)
*/
expected_count += is_device_private_page(page);
if (mapping)
expected_count += hpage_nr_pages(page) + page_has_private(page);
expected_count += thp_nr_pages(page) + page_has_private(page);

return expected_count;
}
Expand Down Expand Up @@ -441,7 +441,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
*/
newpage->index = page->index;
newpage->mapping = page->mapping;
page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */
if (PageSwapBacked(page)) {
__SetPageSwapBacked(newpage);
if (PageSwapCache(page)) {
Expand Down Expand Up @@ -474,7 +474,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
* to one less reference.
* We know this isn't the last reference.
*/
page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
page_ref_unfreeze(page, expected_count - thp_nr_pages(page));

xas_unlock(&xas);
/* Leave irq disabled to prevent preemption while updating stats */
Expand Down Expand Up @@ -591,7 +591,7 @@ static void copy_huge_page(struct page *dst, struct page *src)
} else {
/* thp page */
BUG_ON(!PageTransHuge(src));
nr_pages = hpage_nr_pages(src);
nr_pages = thp_nr_pages(src);
}

for (i = 0; i < nr_pages; i++) {
Expand Down Expand Up @@ -1213,7 +1213,7 @@ static int unmap_and_move(new_page_t get_new_page,
*/
if (likely(!__PageMovable(page)))
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
page_is_file_lru(page), -hpage_nr_pages(page));
page_is_file_lru(page), -thp_nr_pages(page));
}

/*
Expand Down Expand Up @@ -1446,7 +1446,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
* during migration.
*/
is_thp = PageTransHuge(page);
nr_subpages = hpage_nr_pages(page);
nr_subpages = thp_nr_pages(page);
cond_resched();

if (PageHuge(page))
Expand Down Expand Up @@ -1670,7 +1670,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
list_add_tail(&head->lru, pagelist);
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON + page_is_file_lru(head),
hpage_nr_pages(head));
thp_nr_pages(head));
}
out_putpage:
/*
Expand Down Expand Up @@ -2034,7 +2034,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)

page_lru = page_is_file_lru(page);
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
hpage_nr_pages(page));
thp_nr_pages(page));

/*
* Isolating the page has taken another reference, so the
Expand Down
9 changes: 4 additions & 5 deletions mm/mlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,7 @@ void clear_page_mlock(struct page *page)
if (!TestClearPageMlocked(page))
return;

mod_zone_page_state(page_zone(page), NR_MLOCK,
-hpage_nr_pages(page));
mod_zone_page_state(page_zone(page), NR_MLOCK, -thp_nr_pages(page));
count_vm_event(UNEVICTABLE_PGCLEARED);
/*
* The previous TestClearPageMlocked() corresponds to the smp_mb()
Expand Down Expand Up @@ -95,7 +94,7 @@ void mlock_vma_page(struct page *page)

if (!TestSetPageMlocked(page)) {
mod_zone_page_state(page_zone(page), NR_MLOCK,
hpage_nr_pages(page));
thp_nr_pages(page));
count_vm_event(UNEVICTABLE_PGMLOCKED);
if (!isolate_lru_page(page))
putback_lru_page(page);
Expand Down Expand Up @@ -192,7 +191,7 @@ unsigned int munlock_vma_page(struct page *page)
/*
* Serialize with any parallel __split_huge_page_refcount() which
* might otherwise copy PageMlocked to part of the tail pages before
* we clear it in the head page. It also stabilizes hpage_nr_pages().
* we clear it in the head page. It also stabilizes thp_nr_pages().
*/
spin_lock_irq(&pgdat->lru_lock);

Expand All @@ -202,7 +201,7 @@ unsigned int munlock_vma_page(struct page *page)
goto unlock_out;
}

nr_pages = hpage_nr_pages(page);
nr_pages = thp_nr_pages(page);
__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);

if (__munlock_isolate_lru_page(page, true)) {
Expand Down
2 changes: 1 addition & 1 deletion mm/page_io.c
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ static inline void count_swpout_vm_event(struct page *page)
if (unlikely(PageTransHuge(page)))
count_vm_event(THP_SWPOUT);
#endif
count_vm_events(PSWPOUT, hpage_nr_pages(page));
count_vm_events(PSWPOUT, thp_nr_pages(page));
}

#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
Expand Down
2 changes: 1 addition & 1 deletion mm/page_vma_mapped.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ static inline bool pfn_is_match(struct page *page, unsigned long pfn)
return page_pfn == pfn;

/* THP can be referenced by any subpage */
return pfn >= page_pfn && pfn - page_pfn < hpage_nr_pages(page);
return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page);
}

/**
Expand Down
8 changes: 4 additions & 4 deletions mm/rmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -1130,7 +1130,7 @@ void do_page_add_anon_rmap(struct page *page,
}

if (first) {
int nr = compound ? hpage_nr_pages(page) : 1;
int nr = compound ? thp_nr_pages(page) : 1;
/*
* We use the irq-unsafe __{inc|mod}_zone_page_stat because
* these counters are not modified in interrupt context, and
Expand Down Expand Up @@ -1169,7 +1169,7 @@ void do_page_add_anon_rmap(struct page *page,
void page_add_new_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address, bool compound)
{
int nr = compound ? hpage_nr_pages(page) : 1;
int nr = compound ? thp_nr_pages(page) : 1;

VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
__SetPageSwapBacked(page);
Expand Down Expand Up @@ -1860,7 +1860,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
return;

pgoff_start = page_to_pgoff(page);
pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
pgoff_start, pgoff_end) {
struct vm_area_struct *vma = avc->vma;
Expand Down Expand Up @@ -1913,7 +1913,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
return;

pgoff_start = page_to_pgoff(page);
pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
if (!locked)
i_mmap_lock_read(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap,
Expand Down
Loading

0 comments on commit 6c35784

Please sign in to comment.