Skip to content

Commit

Permalink
mm: memcg/slab: unify SLAB and SLUB page accounting
Browse files Browse the repository at this point in the history
Currently the page accounting code is duplicated in SLAB and SLUB
internals.  Let's move it into new (un)charge_slab_page helpers in the
slab_common.c file.  These helpers will be responsible for statistics
(global and memcg-aware) and memcg charging.  So they are replacing direct
memcg_(un)charge_slab() calls.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Roman Gushchin <[email protected]>
Reviewed-by: Shakeel Butt <[email protected]>
Acked-by: Christoph Lameter <[email protected]>
Acked-by: Vladimir Davydov <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Waiman Long <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: Andrei Vagin <[email protected]>
Cc: Qian Cai <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
rgushchin authored and torvalds committed Jul 12, 2019
1 parent 49a18ea commit 6cea1d5
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 28 deletions.
19 changes: 3 additions & 16 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -1360,7 +1360,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
int nodeid)
{
struct page *page;
int nr_pages;

flags |= cachep->allocflags;

Expand All @@ -1370,17 +1369,11 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
return NULL;
}

if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) {
if (charge_slab_page(page, flags, cachep->gfporder, cachep)) {
__free_pages(page, cachep->gfporder);
return NULL;
}

nr_pages = (1 << cachep->gfporder);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages);
else
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages);

__SetPageSlab(page);
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
if (sk_memalloc_socks() && page_is_pfmemalloc(page))
Expand All @@ -1395,12 +1388,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
{
int order = cachep->gfporder;
unsigned long nr_freed = (1 << order);

if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
else
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed);

BUG_ON(!PageSlab(page));
__ClearPageSlabPfmemalloc(page);
Expand All @@ -1409,8 +1396,8 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
page->mapping = NULL;

if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed;
memcg_uncharge_slab(page, order, cachep);
current->reclaim_state->reclaimed_slab += 1 << order;
uncharge_slab_page(page, order, cachep);
__free_pages(page, order);
}

Expand Down
25 changes: 25 additions & 0 deletions mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,12 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);

static inline int cache_vmstat_idx(struct kmem_cache *s)
{
return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE;
}

#ifdef CONFIG_MEMCG_KMEM

/* List of all root caches. */
Expand Down Expand Up @@ -361,6 +367,25 @@ static inline struct kmem_cache *virt_to_cache(const void *obj)
return page->slab_cache;
}

static __always_inline int charge_slab_page(struct page *page,
gfp_t gfp, int order,
struct kmem_cache *s)
{
int ret = memcg_charge_slab(page, gfp, order, s);

if (!ret)
mod_lruvec_page_state(page, cache_vmstat_idx(s), 1 << order);

return ret;
}

static __always_inline void uncharge_slab_page(struct page *page, int order,
struct kmem_cache *s)
{
mod_lruvec_page_state(page, cache_vmstat_idx(s), -(1 << order));
memcg_uncharge_slab(page, order, s);
}

static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{
struct kmem_cache *cachep;
Expand Down
14 changes: 2 additions & 12 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1488,7 +1488,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
else
page = __alloc_pages_node(node, flags, order);

if (page && memcg_charge_slab(page, flags, order, s)) {
if (page && charge_slab_page(page, flags, order, s)) {
__free_pages(page, order);
page = NULL;
}
Expand Down Expand Up @@ -1681,11 +1681,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if (!page)
return NULL;

mod_lruvec_page_state(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1 << oo_order(oo));

inc_slabs_node(s, page_to_nid(page), page->objects);

return page;
Expand Down Expand Up @@ -1719,18 +1714,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
check_object(s, page, p, SLUB_RED_INACTIVE);
}

mod_lruvec_page_state(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-pages);

__ClearPageSlabPfmemalloc(page);
__ClearPageSlab(page);

page->mapping = NULL;
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
memcg_uncharge_slab(page, order, s);
uncharge_slab_page(page, order, s);
__free_pages(page, order);
}

Expand Down

0 comments on commit 6cea1d5

Please sign in to comment.