Skip to content

Commit

Permalink
memcg: aggregate memcg cache values in slabinfo
Browse files Browse the repository at this point in the history
When we create caches in memcgs, we need to display their usage
information somewhere.  We'll adopt a scheme similar to /proc/meminfo,
with aggregate totals shown in the global file, and per-group information
stored in the group itself.

For the time being, only reads are allowed in the per-group cache.

Signed-off-by: Glauber Costa <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Greg Thelen <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: JoonSoo Kim <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Suleiman Souhlal <[email protected]>
Cc: Tejun Heo <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Glauber Costa authored and torvalds committed Dec 18, 2012
1 parent 2293315 commit 749c541
Show file tree
Hide file tree
Showing 5 changed files with 108 additions and 5 deletions.
8 changes: 8 additions & 0 deletions include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -420,6 +420,11 @@ static inline void sock_release_memcg(struct sock *sk)

#ifdef CONFIG_MEMCG_KMEM
extern struct static_key memcg_kmem_enabled_key;

extern int memcg_limited_groups_array_size;
#define for_each_memcg_cache_index(_idx) \
for ((_idx) = 0; i < memcg_limited_groups_array_size; (_idx)++)

static inline bool memcg_kmem_enabled(void)
{
return static_key_false(&memcg_kmem_enabled_key);
Expand Down Expand Up @@ -557,6 +562,9 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
return __memcg_kmem_get_cache(cachep, gfp);
}
#else
#define for_each_memcg_cache_index(_idx) \
for (; NULL; )

static inline bool memcg_kmem_enabled(void)
{
return false;
Expand Down
4 changes: 4 additions & 0 deletions include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,10 @@ struct memcg_cache_params {

int memcg_update_all_caches(int num_memcgs);

struct seq_file;
int cache_show(struct kmem_cache *s, struct seq_file *m);
void print_slabinfo_header(struct seq_file *m);

/*
* Common kmalloc functions provided by all allocators
*/
Expand Down
30 changes: 29 additions & 1 deletion mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -572,7 +572,8 @@ static void disarm_sock_keys(struct mem_cgroup *memcg)
* increase it.
*/
static DEFINE_IDA(kmem_limited_groups);
static int memcg_limited_groups_array_size;
int memcg_limited_groups_array_size;

/*
* MIN_SIZE is different than 1, because we would like to avoid going through
* the alloc/free process all the time. In a small machine, 4 kmem-limited
Expand Down Expand Up @@ -2794,6 +2795,27 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)];
}

#ifdef CONFIG_SLABINFO
static int mem_cgroup_slabinfo_read(struct cgroup *cont, struct cftype *cft,
struct seq_file *m)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
struct memcg_cache_params *params;

if (!memcg_can_account_kmem(memcg))
return -EIO;

print_slabinfo_header(m);

mutex_lock(&memcg->slab_caches_mutex);
list_for_each_entry(params, &memcg->memcg_slab_caches, list)
cache_show(memcg_params_to_cache(params), m);
mutex_unlock(&memcg->slab_caches_mutex);

return 0;
}
#endif

static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
{
struct res_counter *fail_res;
Expand Down Expand Up @@ -5822,6 +5844,12 @@ static struct cftype mem_cgroup_files[] = {
.trigger = mem_cgroup_reset,
.read = mem_cgroup_read,
},
#ifdef CONFIG_SLABINFO
{
.name = "kmem.slabinfo",
.read_seq_string = mem_cgroup_slabinfo_read,
},
#endif
#endif
{ }, /* terminate */
};
Expand Down
27 changes: 27 additions & 0 deletions mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,23 @@ static inline bool slab_equal_or_root(struct kmem_cache *s,
return (p == s) ||
(s->memcg_params && (p == s->memcg_params->root_cache));
}

/*
* We use suffixes to the name in memcg because we can't have caches
* created in the system with the same name. But when we print them
* locally, better refer to them with the base name
*/
static inline const char *cache_name(struct kmem_cache *s)
{
if (!is_root_cache(s))
return s->memcg_params->root_cache->name;
return s->name;
}

static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
{
return s->memcg_params->memcg_caches[idx];
}
#else
static inline bool is_root_cache(struct kmem_cache *s)
{
Expand All @@ -163,6 +180,16 @@ static inline bool slab_equal_or_root(struct kmem_cache *s,
{
return true;
}

static inline const char *cache_name(struct kmem_cache *s)
{
return s->name;
}

static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
{
return NULL;
}
#endif

static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
Expand Down
44 changes: 40 additions & 4 deletions mm/slab_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,


#ifdef CONFIG_SLABINFO
static void print_slabinfo_header(struct seq_file *m)
void print_slabinfo_header(struct seq_file *m)
{
/*
* Output format version, so at least we can change it
Expand Down Expand Up @@ -366,16 +366,43 @@ static void s_stop(struct seq_file *m, void *p)
mutex_unlock(&slab_mutex);
}

static int s_show(struct seq_file *m, void *p)
static void
memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
{
struct kmem_cache *c;
struct slabinfo sinfo;
int i;

if (!is_root_cache(s))
return;

for_each_memcg_cache_index(i) {
c = cache_from_memcg(s, i);
if (!c)
continue;

memset(&sinfo, 0, sizeof(sinfo));
get_slabinfo(c, &sinfo);

info->active_slabs += sinfo.active_slabs;
info->num_slabs += sinfo.num_slabs;
info->shared_avail += sinfo.shared_avail;
info->active_objs += sinfo.active_objs;
info->num_objs += sinfo.num_objs;
}
}

int cache_show(struct kmem_cache *s, struct seq_file *m)
{
struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
struct slabinfo sinfo;

memset(&sinfo, 0, sizeof(sinfo));
get_slabinfo(s, &sinfo);

memcg_accumulate_slabinfo(s, &sinfo);

seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
s->name, sinfo.active_objs, sinfo.num_objs, s->size,
cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
sinfo.objects_per_slab, (1 << sinfo.cache_order));

seq_printf(m, " : tunables %4u %4u %4u",
Expand All @@ -387,6 +414,15 @@ static int s_show(struct seq_file *m, void *p)
return 0;
}

static int s_show(struct seq_file *m, void *p)
{
struct kmem_cache *s = list_entry(p, struct kmem_cache, list);

if (!is_root_cache(s))
return 0;
return cache_show(s, m);
}

/*
* slabinfo_op - iterator that generates /proc/slabinfo
*
Expand Down

0 comments on commit 749c541

Please sign in to comment.