Skip to content

Commit

Permalink
mm: memcontrol: account slab stats per lruvec
Browse files Browse the repository at this point in the history
Josef's redesign of the balancing between slab caches and the page cache
requires slab cache statistics at the lruvec level.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Johannes Weiner <[email protected]>
Acked-by: Vladimir Davydov <[email protected]>
Cc: Josef Bacik <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Rik van Riel <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
hnaz authored and torvalds committed Jul 6, 2017
1 parent 00f3ca2 commit 7779f21
Show file tree
Hide file tree
Showing 3 changed files with 7 additions and 27 deletions.
12 changes: 4 additions & 8 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -1425,11 +1425,9 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,

nr_pages = (1 << cachep->gfporder);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
add_node_page_state(page_pgdat(page),
NR_SLAB_RECLAIMABLE, nr_pages);
mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages);
else
add_node_page_state(page_pgdat(page),
NR_SLAB_UNRECLAIMABLE, nr_pages);
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages);

__SetPageSlab(page);
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
Expand Down Expand Up @@ -1459,11 +1457,9 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
kmemcheck_free_shadow(page, order);

if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
sub_node_page_state(page_pgdat(page),
NR_SLAB_RECLAIMABLE, nr_freed);
mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
else
sub_node_page_state(page_pgdat(page),
NR_SLAB_UNRECLAIMABLE, nr_freed);
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed);

BUG_ON(!PageSlab(page));
__ClearPageSlabPfmemalloc(page);
Expand Down
18 changes: 1 addition & 17 deletions mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -274,34 +274,18 @@ static __always_inline int memcg_charge_slab(struct page *page,
gfp_t gfp, int order,
struct kmem_cache *s)
{
int ret;

if (!memcg_kmem_enabled())
return 0;
if (is_root_cache(s))
return 0;

ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
if (ret)
return ret;

mod_memcg_page_state(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1 << order);
return 0;
return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
}

static __always_inline void memcg_uncharge_slab(struct page *page, int order,
struct kmem_cache *s)
{
if (!memcg_kmem_enabled())
return;

mod_memcg_page_state(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-(1 << order));
memcg_kmem_uncharge(page, order);
}

Expand Down
4 changes: 2 additions & 2 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1615,7 +1615,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if (!page)
return NULL;

mod_node_page_state(page_pgdat(page),
mod_lruvec_page_state(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1 << oo_order(oo));
Expand Down Expand Up @@ -1655,7 +1655,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)

kmemcheck_free_shadow(page, compound_order(page));

mod_node_page_state(page_pgdat(page),
mod_lruvec_page_state(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-pages);
Expand Down

0 comments on commit 7779f21

Please sign in to comment.