Skip to content

Commit

Permalink
slab: embed memcg_cache_params to kmem_cache
Browse files Browse the repository at this point in the history
Currently, kmem_cache stores a pointer to struct memcg_cache_params
instead of embedding it.  The rationale is to save memory when kmem
accounting is disabled.  However, the memcg_cache_params has shrivelled
drastically since it was first introduced:

* Initially:

struct memcg_cache_params {
	bool is_root_cache;
	union {
		struct kmem_cache *memcg_caches[0];
		struct {
			struct mem_cgroup *memcg;
			struct list_head list;
			struct kmem_cache *root_cache;
			bool dead;
			atomic_t nr_pages;
			struct work_struct destroy;
		};
	};
};

* Now:

struct memcg_cache_params {
	bool is_root_cache;
	union {
		struct {
			struct rcu_head rcu_head;
			struct kmem_cache *memcg_caches[0];
		};
		struct {
			struct mem_cgroup *memcg;
			struct kmem_cache *root_cache;
		};
	};
};

So the memory saving does not seem to be a clear win anymore.

OTOH, keeping a pointer to memcg_cache_params struct instead of embedding
it results in touching one more cache line on kmem alloc/free hot paths.
Besides, it makes linking kmem caches in a list chained by a field of
struct memcg_cache_params really painful due to a level of indirection,
while I want to make them linked in the following patch.  That said, let
us embed it.

Signed-off-by: Vladimir Davydov <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Dave Chinner <[email protected]>
Cc: Dan Carpenter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Vladimir Davydov authored and torvalds committed Feb 13, 2015
1 parent 49e7e7f commit f7ce319
Show file tree
Hide file tree
Showing 7 changed files with 111 additions and 103 deletions.
17 changes: 7 additions & 10 deletions include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -473,14 +473,14 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
#ifndef ARCH_SLAB_MINALIGN
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif

struct memcg_cache_array {
struct rcu_head rcu;
struct kmem_cache *entries[0];
};

/*
* This is the main placeholder for memcg-related information in kmem caches.
* struct kmem_cache will hold a pointer to it, so the memory cost while
* disabled is 1 pointer. The runtime cost while enabled, gets bigger than it
* would otherwise be if that would be bundled in kmem_cache: we'll need an
* extra pointer chase. But the trade off clearly lays in favor of not
* penalizing non-users.
*
* Both the root cache and the child caches will have it. For the root cache,
* this will hold a dynamically allocated array large enough to hold
* information about the currently limited memcgs in the system. To allow the
Expand All @@ -495,10 +495,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
struct memcg_cache_params {
bool is_root_cache;
union {
struct {
struct rcu_head rcu_head;
struct kmem_cache *memcg_caches[0];
};
struct memcg_cache_array __rcu *memcg_caches;
struct {
struct mem_cgroup *memcg;
struct kmem_cache *root_cache;
Expand Down
2 changes: 1 addition & 1 deletion include/linux/slab_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ struct kmem_cache {
int obj_offset;
#endif /* CONFIG_DEBUG_SLAB */
#ifdef CONFIG_MEMCG_KMEM
struct memcg_cache_params *memcg_params;
struct memcg_cache_params memcg_params;
#endif

struct kmem_cache_node *node[MAX_NUMNODES];
Expand Down
2 changes: 1 addition & 1 deletion include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ struct kmem_cache {
struct kobject kobj; /* For sysfs */
#endif
#ifdef CONFIG_MEMCG_KMEM
struct memcg_cache_params *memcg_params;
struct memcg_cache_params memcg_params;
int max_attr_size; /* for propagation, maximum size of a stored attr */
#ifdef CONFIG_SYSFS
struct kset *memcg_kset;
Expand Down
11 changes: 5 additions & 6 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,7 @@ struct mem_cgroup {
struct cg_proto tcp_mem;
#endif
#if defined(CONFIG_MEMCG_KMEM)
/* Index in the kmem_cache->memcg_params->memcg_caches array */
/* Index in the kmem_cache->memcg_params.memcg_caches array */
int kmemcg_id;
#endif

Expand Down Expand Up @@ -531,7 +531,7 @@ static void disarm_sock_keys(struct mem_cgroup *memcg)

#ifdef CONFIG_MEMCG_KMEM
/*
* This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
* This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
* The main reason for not using cgroup id for this:
* this works better in sparse environments, where we have a lot of memcgs,
* but only a few kmem-limited. Or also, if we have, for instance, 200
Expand Down Expand Up @@ -2667,8 +2667,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
struct mem_cgroup *memcg;
struct kmem_cache *memcg_cachep;

VM_BUG_ON(!cachep->memcg_params);
VM_BUG_ON(!cachep->memcg_params->is_root_cache);
VM_BUG_ON(!is_root_cache(cachep));

if (current->memcg_kmem_skip_account)
return cachep;
Expand Down Expand Up @@ -2702,7 +2701,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
void __memcg_kmem_put_cache(struct kmem_cache *cachep)
{
if (!is_root_cache(cachep))
css_put(&cachep->memcg_params->memcg->css);
css_put(&cachep->memcg_params.memcg->css);
}

/*
Expand Down Expand Up @@ -2778,7 +2777,7 @@ struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr)
if (PageSlab(page)) {
cachep = page->slab_cache;
if (!is_root_cache(cachep))
memcg = cachep->memcg_params->memcg;
memcg = cachep->memcg_params.memcg;
} else
/* page allocated by alloc_kmem_pages */
memcg = page->mem_cgroup;
Expand Down
48 changes: 23 additions & 25 deletions mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,6 @@ extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
extern void create_boot_cache(struct kmem_cache *, const char *name,
size_t size, unsigned long flags);

struct mem_cgroup;

int slab_unmergeable(struct kmem_cache *s);
struct kmem_cache *find_mergeable(size_t size, size_t align,
unsigned long flags, const char *name, void (*ctor)(void *));
Expand Down Expand Up @@ -167,14 +165,13 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
#ifdef CONFIG_MEMCG_KMEM
static inline bool is_root_cache(struct kmem_cache *s)
{
return !s->memcg_params || s->memcg_params->is_root_cache;
return s->memcg_params.is_root_cache;
}

static inline bool slab_equal_or_root(struct kmem_cache *s,
struct kmem_cache *p)
struct kmem_cache *p)
{
return (p == s) ||
(s->memcg_params && (p == s->memcg_params->root_cache));
return p == s || p == s->memcg_params.root_cache;
}

/*
Expand All @@ -185,37 +182,30 @@ static inline bool slab_equal_or_root(struct kmem_cache *s,
static inline const char *cache_name(struct kmem_cache *s)
{
if (!is_root_cache(s))
return s->memcg_params->root_cache->name;
s = s->memcg_params.root_cache;
return s->name;
}

/*
* Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
* That said the caller must assure the memcg's cache won't go away. Since once
* created a memcg's cache is destroyed only along with the root cache, it is
* true if we are going to allocate from the cache or hold a reference to the
* root cache by other means. Otherwise, we should hold either the slab_mutex
* or the memcg's slab_caches_mutex while calling this function and accessing
* the returned value.
* That said the caller must assure the memcg's cache won't go away by either
* taking a css reference to the owner cgroup, or holding the slab_mutex.
*/
static inline struct kmem_cache *
cache_from_memcg_idx(struct kmem_cache *s, int idx)
{
struct kmem_cache *cachep;
struct memcg_cache_params *params;

if (!s->memcg_params)
return NULL;
struct memcg_cache_array *arr;

rcu_read_lock();
params = rcu_dereference(s->memcg_params);
arr = rcu_dereference(s->memcg_params.memcg_caches);

/*
* Make sure we will access the up-to-date value. The code updating
* memcg_caches issues a write barrier to match this (see
* memcg_register_cache()).
* memcg_create_kmem_cache()).
*/
cachep = lockless_dereference(params->memcg_caches[idx]);
cachep = lockless_dereference(arr->entries[idx]);
rcu_read_unlock();

return cachep;
Expand All @@ -225,7 +215,7 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
{
if (is_root_cache(s))
return s;
return s->memcg_params->root_cache;
return s->memcg_params.root_cache;
}

static __always_inline int memcg_charge_slab(struct kmem_cache *s,
Expand All @@ -235,7 +225,7 @@ static __always_inline int memcg_charge_slab(struct kmem_cache *s,
return 0;
if (is_root_cache(s))
return 0;
return memcg_charge_kmem(s->memcg_params->memcg, gfp, 1 << order);
return memcg_charge_kmem(s->memcg_params.memcg, gfp, 1 << order);
}

static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
Expand All @@ -244,9 +234,13 @@ static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
return;
if (is_root_cache(s))
return;
memcg_uncharge_kmem(s->memcg_params->memcg, 1 << order);
memcg_uncharge_kmem(s->memcg_params.memcg, 1 << order);
}
#else

extern void slab_init_memcg_params(struct kmem_cache *);

#else /* !CONFIG_MEMCG_KMEM */

static inline bool is_root_cache(struct kmem_cache *s)
{
return true;
Expand Down Expand Up @@ -282,7 +276,11 @@ static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order)
static inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
{
}
#endif

static inline void slab_init_memcg_params(struct kmem_cache *s)
{
}
#endif /* CONFIG_MEMCG_KMEM */

static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{
Expand Down
Loading

0 comments on commit f7ce319

Please sign in to comment.