Skip to content

Commit

Permalink
mm: memcontrol: report slab usage in cgroup2 memory.stat
Browse files Browse the repository at this point in the history
Show how much memory is used for storing reclaimable and unreclaimable
in-kernel data structures allocated from slab caches.

Signed-off-by: Vladimir Davydov <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Michal Hocko <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Vladimir Davydov authored and torvalds committed Mar 17, 2016
1 parent 72b54e7 commit 27ee57c
Show file tree
Hide file tree
Showing 6 changed files with 79 additions and 6 deletions.
15 changes: 15 additions & 0 deletions Documentation/cgroup-v2.txt
Original file line number Diff line number Diff line change
Expand Up @@ -843,6 +843,11 @@ PAGE_SIZE multiple when read back.
Amount of memory used to cache filesystem data,
including tmpfs and shared memory.

slab

Amount of memory used for storing in-kernel data
structures.

sock

Amount of memory used in network transmission buffers
Expand Down Expand Up @@ -871,6 +876,16 @@ PAGE_SIZE multiple when read back.
on the internal memory management lists used by the
page reclaim algorithm

slab_reclaimable

Part of "slab" that might be reclaimed, such as
dentries and inodes.

slab_unreclaimable

Part of "slab" that cannot be reclaimed on memory
pressure.

pgfault

Total number of page faults incurred
Expand Down
21 changes: 21 additions & 0 deletions include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_NSTATS,
/* default hierarchy stats */
MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS,
MEMCG_SLAB_RECLAIMABLE,
MEMCG_SLAB_UNRECLAIMABLE,
MEMCG_NR_STAT,
};

Expand Down Expand Up @@ -883,6 +885,20 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
if (memcg_kmem_enabled())
__memcg_kmem_put_cache(cachep);
}

/**
* memcg_kmem_update_page_stat - update kmem page state statistics
* @page: the page
* @idx: page state item to account
* @val: number of pages (positive or negative)
*/
static inline void memcg_kmem_update_page_stat(struct page *page,
enum mem_cgroup_stat_index idx, int val)
{
if (memcg_kmem_enabled() && page->mem_cgroup)
this_cpu_add(page->mem_cgroup->stat->count[idx], val);
}

#else
#define for_each_memcg_cache_index(_idx) \
for (; NULL; )
Expand Down Expand Up @@ -928,6 +944,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
{
}

static inline void memcg_kmem_update_page_stat(struct page *page,
enum mem_cgroup_stat_index idx, int val)
{
}
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */

#endif /* _LINUX_MEMCONTROL_H */
8 changes: 8 additions & 0 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -5106,6 +5106,9 @@ static int memory_stat_show(struct seq_file *m, void *v)
(u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
seq_printf(m, "file %llu\n",
(u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
seq_printf(m, "slab %llu\n",
(u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
seq_printf(m, "sock %llu\n",
(u64)stat[MEMCG_SOCK] * PAGE_SIZE);

Expand All @@ -5126,6 +5129,11 @@ static int memory_stat_show(struct seq_file *m, void *v)
mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
}

seq_printf(m, "slab_reclaimable %llu\n",
(u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
seq_printf(m, "slab_unreclaimable %llu\n",
(u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);

/* Accumulated memory events */

seq_printf(m, "pgfault %lu\n",
Expand Down
8 changes: 5 additions & 3 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -1442,9 +1442,10 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
*/
static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
{
const unsigned long nr_freed = (1 << cachep->gfporder);
int order = cachep->gfporder;
unsigned long nr_freed = (1 << order);

kmemcheck_free_shadow(page, cachep->gfporder);
kmemcheck_free_shadow(page, order);

if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
sub_zone_page_state(page_zone(page),
Expand All @@ -1461,7 +1462,8 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)

if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed;
__free_kmem_pages(page, cachep->gfporder);
memcg_uncharge_slab(page, order, cachep);
__free_pages(page, order);
}

static void kmem_rcu_free(struct rcu_head *head)
Expand Down
30 changes: 28 additions & 2 deletions mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -246,12 +246,33 @@ static __always_inline int memcg_charge_slab(struct page *page,
gfp_t gfp, int order,
struct kmem_cache *s)
{
int ret;

if (!memcg_kmem_enabled())
return 0;
if (is_root_cache(s))
return 0;
return __memcg_kmem_charge_memcg(page, gfp, order,
s->memcg_params.memcg);

ret = __memcg_kmem_charge_memcg(page, gfp, order,
s->memcg_params.memcg);
if (ret)
return ret;

memcg_kmem_update_page_stat(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
1 << order);
return 0;
}

static __always_inline void memcg_uncharge_slab(struct page *page, int order,
struct kmem_cache *s)
{
memcg_kmem_update_page_stat(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
-(1 << order));
memcg_kmem_uncharge(page, order);
}

extern void slab_init_memcg_params(struct kmem_cache *);
Expand Down Expand Up @@ -294,6 +315,11 @@ static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
return 0;
}

static inline void memcg_uncharge_slab(struct page *page, int order,
struct kmem_cache *s)
{
}

static inline void slab_init_memcg_params(struct kmem_cache *s)
{
}
Expand Down
3 changes: 2 additions & 1 deletion mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1540,7 +1540,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
page_mapcount_reset(page);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
__free_kmem_pages(page, order);
memcg_uncharge_slab(page, order, s);
__free_pages(page, order);
}

#define need_reserve_slab_rcu \
Expand Down

0 comments on commit 27ee57c

Please sign in to comment.