Skip to content

Commit

Permalink
mm: memcontrol: drop @Compound parameter from memcg charging API
Browse files Browse the repository at this point in the history
The memcg charging API carries a boolean @Compound parameter that tells
whether the page we're dealing with is a hugepage.
mem_cgroup_commit_charge() has another boolean @lrucare that indicates
whether the page needs LRU locking or not while charging.  The majority of
callsites know those parameters at compile time, which results in a lot of
naked "false, false" argument lists.  This makes for cryptic code and is a
breeding ground for subtle mistakes.

Thankfully, the huge page state can be inferred from the page itself and
doesn't need to be passed along.  This is safe because charging completes
before the page is published and somebody may split it.

Simplify the callsites by removing @Compound, and let memcg infer the
state by using hpage_nr_pages() unconditionally.  That function does
PageTransHuge() to identify huge pages, which also helpfully asserts that
nobody passes in tail pages by accident.

The following patches will introduce a new charging API, best not to carry
over unnecessary weight.

Signed-off-by: Johannes Weiner <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Reviewed-by: Alex Shi <[email protected]>
Reviewed-by: Joonsoo Kim <[email protected]>
Reviewed-by: Shakeel Butt <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: "Kirill A. Shutemov" <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Roman Gushchin <[email protected]>
Cc: Balbir Singh <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
hnaz authored and torvalds committed Jun 4, 2020
1 parent abb242f commit 3fba69a
Show file tree
Hide file tree
Showing 11 changed files with 77 additions and 98 deletions.
22 changes: 8 additions & 14 deletions include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -359,15 +359,12 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
struct mem_cgroup *memcg);

int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcgp,
bool compound);
gfp_t gfp_mask, struct mem_cgroup **memcgp);
int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcgp,
bool compound);
gfp_t gfp_mask, struct mem_cgroup **memcgp);
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
bool lrucare, bool compound);
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
bool compound);
bool lrucare);
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg);
void mem_cgroup_uncharge(struct page *page);
void mem_cgroup_uncharge_list(struct list_head *page_list);

Expand Down Expand Up @@ -849,8 +846,7 @@ static inline enum mem_cgroup_protection mem_cgroup_protected(

static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask,
struct mem_cgroup **memcgp,
bool compound)
struct mem_cgroup **memcgp)
{
*memcgp = NULL;
return 0;
Expand All @@ -859,22 +855,20 @@ static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
static inline int mem_cgroup_try_charge_delay(struct page *page,
struct mm_struct *mm,
gfp_t gfp_mask,
struct mem_cgroup **memcgp,
bool compound)
struct mem_cgroup **memcgp)
{
*memcgp = NULL;
return 0;
}

static inline void mem_cgroup_commit_charge(struct page *page,
struct mem_cgroup *memcg,
bool lrucare, bool compound)
bool lrucare)
{
}

static inline void mem_cgroup_cancel_charge(struct page *page,
struct mem_cgroup *memcg,
bool compound)
struct mem_cgroup *memcg)
{
}

Expand Down
6 changes: 3 additions & 3 deletions kernel/events/uprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,

if (new_page) {
err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL,
&memcg, false);
&memcg);
if (err)
return err;
}
Expand All @@ -181,15 +181,15 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
err = -EAGAIN;
if (!page_vma_mapped_walk(&pvmw)) {
if (new_page)
mem_cgroup_cancel_charge(new_page, memcg, false);
mem_cgroup_cancel_charge(new_page, memcg);
goto unlock;
}
VM_BUG_ON_PAGE(addr != pvmw.address, old_page);

if (new_page) {
get_page(new_page);
page_add_new_anon_rmap(new_page, vma, addr, false);
mem_cgroup_commit_charge(new_page, memcg, false, false);
mem_cgroup_commit_charge(new_page, memcg, false);
lru_cache_add_active_or_unevictable(new_page, vma);
} else
/* no new page, just dec_mm_counter for old_page */
Expand Down
6 changes: 3 additions & 3 deletions mm/filemap.c
Original file line number Diff line number Diff line change
Expand Up @@ -842,7 +842,7 @@ static int __add_to_page_cache_locked(struct page *page,

if (!huge) {
error = mem_cgroup_try_charge(page, current->mm,
gfp_mask, &memcg, false);
gfp_mask, &memcg);
if (error)
return error;
}
Expand Down Expand Up @@ -878,14 +878,14 @@ static int __add_to_page_cache_locked(struct page *page,
goto error;

if (!huge)
mem_cgroup_commit_charge(page, memcg, false, false);
mem_cgroup_commit_charge(page, memcg, false);
trace_mm_filemap_add_to_page_cache(page);
return 0;
error:
page->mapping = NULL;
/* Leave page->index set: truncation relies upon it */
if (!huge)
mem_cgroup_cancel_charge(page, memcg, false);
mem_cgroup_cancel_charge(page, memcg);
put_page(page);
return xas_error(&xas);
}
Expand Down
8 changes: 4 additions & 4 deletions mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -594,7 +594,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,

VM_BUG_ON_PAGE(!PageCompound(page), page);

if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) {
if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg)) {
put_page(page);
count_vm_event(THP_FAULT_FALLBACK);
count_vm_event(THP_FAULT_FALLBACK_CHARGE);
Expand Down Expand Up @@ -630,7 +630,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
vm_fault_t ret2;

spin_unlock(vmf->ptl);
mem_cgroup_cancel_charge(page, memcg, true);
mem_cgroup_cancel_charge(page, memcg);
put_page(page);
pte_free(vma->vm_mm, pgtable);
ret2 = handle_userfault(vmf, VM_UFFD_MISSING);
Expand All @@ -641,7 +641,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
entry = mk_huge_pmd(page, vma->vm_page_prot);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
page_add_new_anon_rmap(page, vma, haddr, true);
mem_cgroup_commit_charge(page, memcg, false, true);
mem_cgroup_commit_charge(page, memcg, false);
lru_cache_add_active_or_unevictable(page, vma);
pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
Expand All @@ -658,7 +658,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
release:
if (pgtable)
pte_free(vma->vm_mm, pgtable);
mem_cgroup_cancel_charge(page, memcg, true);
mem_cgroup_cancel_charge(page, memcg);
put_page(page);
return ret;

Expand Down
20 changes: 10 additions & 10 deletions mm/khugepaged.c
Original file line number Diff line number Diff line change
Expand Up @@ -1060,23 +1060,23 @@ static void collapse_huge_page(struct mm_struct *mm,
goto out_nolock;
}

if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg))) {
result = SCAN_CGROUP_CHARGE_FAIL;
goto out_nolock;
}

down_read(&mm->mmap_sem);
result = hugepage_vma_revalidate(mm, address, &vma);
if (result) {
mem_cgroup_cancel_charge(new_page, memcg, true);
mem_cgroup_cancel_charge(new_page, memcg);
up_read(&mm->mmap_sem);
goto out_nolock;
}

pmd = mm_find_pmd(mm, address);
if (!pmd) {
result = SCAN_PMD_NULL;
mem_cgroup_cancel_charge(new_page, memcg, true);
mem_cgroup_cancel_charge(new_page, memcg);
up_read(&mm->mmap_sem);
goto out_nolock;
}
Expand All @@ -1088,7 +1088,7 @@ static void collapse_huge_page(struct mm_struct *mm,
*/
if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
pmd, referenced)) {
mem_cgroup_cancel_charge(new_page, memcg, true);
mem_cgroup_cancel_charge(new_page, memcg);
up_read(&mm->mmap_sem);
goto out_nolock;
}
Expand Down Expand Up @@ -1176,7 +1176,7 @@ static void collapse_huge_page(struct mm_struct *mm,
spin_lock(pmd_ptl);
BUG_ON(!pmd_none(*pmd));
page_add_new_anon_rmap(new_page, vma, address, true);
mem_cgroup_commit_charge(new_page, memcg, false, true);
mem_cgroup_commit_charge(new_page, memcg, false);
count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
lru_cache_add_active_or_unevictable(new_page, vma);
pgtable_trans_huge_deposit(mm, pmd, pgtable);
Expand All @@ -1194,7 +1194,7 @@ static void collapse_huge_page(struct mm_struct *mm,
trace_mm_collapse_huge_page(mm, isolated, result);
return;
out:
mem_cgroup_cancel_charge(new_page, memcg, true);
mem_cgroup_cancel_charge(new_page, memcg);
goto out_up_write;
}

Expand Down Expand Up @@ -1637,7 +1637,7 @@ static void collapse_file(struct mm_struct *mm,
goto out;
}

if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg))) {
result = SCAN_CGROUP_CHARGE_FAIL;
goto out;
}
Expand All @@ -1650,7 +1650,7 @@ static void collapse_file(struct mm_struct *mm,
break;
xas_unlock_irq(&xas);
if (!xas_nomem(&xas, GFP_KERNEL)) {
mem_cgroup_cancel_charge(new_page, memcg, true);
mem_cgroup_cancel_charge(new_page, memcg);
result = SCAN_FAIL;
goto out;
}
Expand Down Expand Up @@ -1887,7 +1887,7 @@ static void collapse_file(struct mm_struct *mm,

SetPageUptodate(new_page);
page_ref_add(new_page, HPAGE_PMD_NR - 1);
mem_cgroup_commit_charge(new_page, memcg, false, true);
mem_cgroup_commit_charge(new_page, memcg, false);

if (is_shmem) {
set_page_dirty(new_page);
Expand Down Expand Up @@ -1942,7 +1942,7 @@ static void collapse_file(struct mm_struct *mm,
VM_BUG_ON(nr_none);
xas_unlock_irq(&xas);

mem_cgroup_cancel_charge(new_page, memcg, true);
mem_cgroup_cancel_charge(new_page, memcg);
new_page->mapping = NULL;
}

Expand Down
38 changes: 15 additions & 23 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -834,7 +834,7 @@ static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)

static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
struct page *page,
bool compound, int nr_pages)
int nr_pages)
{
/*
* Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
Expand All @@ -848,7 +848,7 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
__mod_memcg_state(memcg, NR_SHMEM, nr_pages);
}

if (compound) {
if (abs(nr_pages) > 1) {
VM_BUG_ON_PAGE(!PageTransHuge(page), page);
__mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages);
}
Expand Down Expand Up @@ -5501,9 +5501,9 @@ static int mem_cgroup_move_account(struct page *page,
ret = 0;

local_irq_disable();
mem_cgroup_charge_statistics(to, page, compound, nr_pages);
mem_cgroup_charge_statistics(to, page, nr_pages);
memcg_check_events(to, page);
mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
mem_cgroup_charge_statistics(from, page, -nr_pages);
memcg_check_events(from, page);
local_irq_enable();
out_unlock:
Expand Down Expand Up @@ -6494,7 +6494,6 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
* @mm: mm context of the victim
* @gfp_mask: reclaim mode
* @memcgp: charged memcg return
* @compound: charge the page as compound or small page
*
* Try to charge @page to the memcg that @mm belongs to, reclaiming
* pages according to @gfp_mask if necessary.
Expand All @@ -6507,11 +6506,10 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
* with mem_cgroup_cancel_charge() in case page instantiation fails.
*/
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcgp,
bool compound)
gfp_t gfp_mask, struct mem_cgroup **memcgp)
{
unsigned int nr_pages = hpage_nr_pages(page);
struct mem_cgroup *memcg = NULL;
unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
int ret = 0;

if (mem_cgroup_disabled())
Expand Down Expand Up @@ -6553,13 +6551,12 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
}

int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcgp,
bool compound)
gfp_t gfp_mask, struct mem_cgroup **memcgp)
{
struct mem_cgroup *memcg;
int ret;

ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp, compound);
ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp);
memcg = *memcgp;
mem_cgroup_throttle_swaprate(memcg, page_to_nid(page), gfp_mask);
return ret;
Expand All @@ -6570,7 +6567,6 @@ int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
* @page: page to charge
* @memcg: memcg to charge the page to
* @lrucare: page might be on LRU already
* @compound: charge the page as compound or small page
*
* Finalize a charge transaction started by mem_cgroup_try_charge(),
* after page->mapping has been set up. This must happen atomically
Expand All @@ -6583,9 +6579,9 @@ int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
* Use mem_cgroup_cancel_charge() to cancel the transaction instead.
*/
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
bool lrucare, bool compound)
bool lrucare)
{
unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
unsigned int nr_pages = hpage_nr_pages(page);

VM_BUG_ON_PAGE(!page->mapping, page);
VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
Expand All @@ -6603,7 +6599,7 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
commit_charge(page, memcg, lrucare);

local_irq_disable();
mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
mem_cgroup_charge_statistics(memcg, page, nr_pages);
memcg_check_events(memcg, page);
local_irq_enable();

Expand All @@ -6622,14 +6618,12 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
* mem_cgroup_cancel_charge - cancel a page charge
* @page: page to charge
* @memcg: memcg to charge the page to
* @compound: charge the page as compound or small page
*
* Cancel a charge transaction started by mem_cgroup_try_charge().
*/
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
bool compound)
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
{
unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
unsigned int nr_pages = hpage_nr_pages(page);

if (mem_cgroup_disabled())
return;
Expand Down Expand Up @@ -6844,8 +6838,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
commit_charge(newpage, memcg, false);

local_irq_save(flags);
mem_cgroup_charge_statistics(memcg, newpage, PageTransHuge(newpage),
nr_pages);
mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
memcg_check_events(memcg, newpage);
local_irq_restore(flags);
}
Expand Down Expand Up @@ -7075,8 +7068,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
* only synchronisation we have for updating the per-CPU variables.
*/
VM_BUG_ON(!irqs_disabled());
mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page),
-nr_entries);
mem_cgroup_charge_statistics(memcg, page, -nr_entries);
memcg_check_events(memcg, page);

if (!mem_cgroup_is_root(memcg))
Expand Down
Loading

0 comments on commit 3fba69a

Please sign in to comment.