Skip to content

Commit

Permalink
mm/memcg: Convert uncharge_page() to uncharge_folio()
Browse files Browse the repository at this point in the history
Use a folio rather than a page to ensure that we're only operating on
base or head pages, and not tail pages.

Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Reviewed-by: David Howells <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
  • Loading branch information
Matthew Wilcox (Oracle) committed Sep 27, 2021
1 parent 8f425e4 commit c4ed6eb
Showing 1 changed file with 15 additions and 16 deletions.
31 changes: 15 additions & 16 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -6794,24 +6794,23 @@ static void uncharge_batch(const struct uncharge_gather *ug)
memcg_check_events(ug->memcg, ug->nid);
local_irq_restore(flags);

/* drop reference from uncharge_page */
/* drop reference from uncharge_folio */
css_put(&ug->memcg->css);
}

static void uncharge_page(struct page *page, struct uncharge_gather *ug)
static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
{
struct folio *folio = page_folio(page);
unsigned long nr_pages;
long nr_pages;
struct mem_cgroup *memcg;
struct obj_cgroup *objcg;
bool use_objcg = PageMemcgKmem(page);
bool use_objcg = folio_memcg_kmem(folio);

VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);

/*
* Nobody should be changing or seriously looking at
* page memcg or objcg at this point, we have fully
* exclusive access to the page.
* folio memcg or objcg at this point, we have fully
* exclusive access to the folio.
*/
if (use_objcg) {
objcg = __folio_objcg(folio);
Expand All @@ -6833,27 +6832,27 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
uncharge_gather_clear(ug);
}
ug->memcg = memcg;
ug->nid = page_to_nid(page);
ug->nid = folio_nid(folio);

/* pairs with css_put in uncharge_batch */
css_get(&memcg->css);
}

nr_pages = compound_nr(page);
nr_pages = folio_nr_pages(folio);

if (use_objcg) {
ug->nr_memory += nr_pages;
ug->nr_kmem += nr_pages;

page->memcg_data = 0;
folio->memcg_data = 0;
obj_cgroup_put(objcg);
} else {
/* LRU pages aren't accounted at the root level */
if (!mem_cgroup_is_root(memcg))
ug->nr_memory += nr_pages;
ug->pgpgout++;

page->memcg_data = 0;
folio->memcg_data = 0;
}

css_put(&memcg->css);
Expand All @@ -6874,7 +6873,7 @@ void __mem_cgroup_uncharge(struct page *page)
return;

uncharge_gather_clear(&ug);
uncharge_page(page, &ug);
uncharge_folio(page_folio(page), &ug);
uncharge_batch(&ug);
}

Expand All @@ -6888,11 +6887,11 @@ void __mem_cgroup_uncharge(struct page *page)
void __mem_cgroup_uncharge_list(struct list_head *page_list)
{
struct uncharge_gather ug;
struct page *page;
struct folio *folio;

uncharge_gather_clear(&ug);
list_for_each_entry(page, page_list, lru)
uncharge_page(page, &ug);
list_for_each_entry(folio, page_list, lru)
uncharge_folio(folio, &ug);
if (ug.memcg)
uncharge_batch(&ug);
}
Expand Down

0 comments on commit c4ed6eb

Please sign in to comment.