Skip to content

Commit

Permalink
mm: remove CONFIG_MEMCG_KMEM
Browse files Browse the repository at this point in the history
CONFIG_MEMCG_KMEM used to be a user-visible option for whether slab
tracking is enabled.  It has been default-enabled and equivalent to
CONFIG_MEMCG for almost a decade.  We've only grown more kernel memory
accounting sites since, and there is no imaginable cgroup usecase going
forward that wants to track user pages but not the multitude of
user-drivable kernel allocations.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Johannes Weiner <[email protected]>
Acked-by: Roman Gushchin <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Acked-by: Shakeel Butt <[email protected]>
Acked-by: David Hildenbrand <[email protected]>
Cc: Muchun Song <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
hnaz authored and akpm00 committed Jul 10, 2024
1 parent 6df1323 commit 3a3b7fe
Show file tree
Hide file tree
Showing 20 changed files with 59 additions and 131 deletions.
4 changes: 2 additions & 2 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ struct bpf_map {
u32 btf_value_type_id;
u32 btf_vmlinux_value_type_id;
struct btf *btf;
#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
struct obj_cgroup *objcg;
#endif
char name[BPF_OBJ_NAME_LEN];
Expand Down Expand Up @@ -2252,7 +2252,7 @@ struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);

int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
unsigned long nr_pages, struct page **page_array);
#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
int node);
void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
Expand Down
2 changes: 1 addition & 1 deletion include/linux/list_lru.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ struct list_lru_node {

struct list_lru {
struct list_lru_node *node;
#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
struct list_head list;
int shrinker_id;
bool memcg_aware;
Expand Down
22 changes: 5 additions & 17 deletions include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ struct mem_cgroup {
/* Range enforcement for interrupt charges */
struct work_struct high_work;

#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
#ifdef CONFIG_ZSWAP
unsigned long zswap_max;

/*
Expand Down Expand Up @@ -236,7 +236,6 @@ struct mem_cgroup {
*/
unsigned long socket_pressure;

#ifdef CONFIG_MEMCG_KMEM
int kmemcg_id;
/*
* memcg->objcg is wiped out as a part of the objcg repaprenting
Expand All @@ -247,7 +246,6 @@ struct mem_cgroup {
struct obj_cgroup *orig_objcg;
/* list of inherited objcgs, protected by objcg_lock */
struct list_head objcg_list;
#endif

struct memcg_vmstats_percpu __percpu *vmstats_percpu;

Expand Down Expand Up @@ -532,7 +530,6 @@ static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *ob
return memcg;
}

#ifdef CONFIG_MEMCG_KMEM
/*
* folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
* @folio: Pointer to the folio.
Expand All @@ -548,15 +545,6 @@ static inline bool folio_memcg_kmem(struct folio *folio)
return folio->memcg_data & MEMCG_DATA_KMEM;
}


#else
static inline bool folio_memcg_kmem(struct folio *folio)
{
return false;
}

#endif

static inline bool PageMemcgKmem(struct page *page)
{
return folio_memcg_kmem(page_folio(page));
Expand Down Expand Up @@ -1488,7 +1476,7 @@ static inline void split_page_memcg(struct page *head, int old_order, int new_or
* if MEMCG_DATA_OBJEXTS is set.
*/
struct slabobj_ext {
#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
struct obj_cgroup *objcg;
#endif
#ifdef CONFIG_MEM_ALLOC_PROFILING
Expand Down Expand Up @@ -1663,7 +1651,7 @@ static inline void set_shrinker_bit(struct mem_cgroup *memcg,
}
#endif

#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
bool mem_cgroup_kmem_disabled(void);
int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge_page(struct page *page, int order);
Expand Down Expand Up @@ -1806,9 +1794,9 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
{
}

#endif /* CONFIG_MEMCG_KMEM */
#endif /* CONFIG_MEMCG */

#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
#if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
Expand Down
3 changes: 1 addition & 2 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1457,9 +1457,8 @@ struct task_struct {

/* Used by memcontrol for targeted memcg charge: */
struct mem_cgroup *active_memcg;
#endif

#ifdef CONFIG_MEMCG_KMEM
/* Cache for current->cgroups->memcg->objcg lookups: */
struct obj_cgroup *objcg;
#endif

Expand Down
12 changes: 6 additions & 6 deletions include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ enum _slab_flag_bits {
#ifdef CONFIG_FAILSLAB
_SLAB_FAILSLAB,
#endif
#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
_SLAB_ACCOUNT,
#endif
#ifdef CONFIG_KASAN_GENERIC
Expand Down Expand Up @@ -171,7 +171,7 @@ enum _slab_flag_bits {
# define SLAB_FAILSLAB __SLAB_FLAG_UNUSED
#endif
/* Account to memcg */
#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
#else
# define SLAB_ACCOUNT __SLAB_FLAG_UNUSED
Expand Down Expand Up @@ -407,7 +407,7 @@ enum kmalloc_cache_type {
#ifndef CONFIG_ZONE_DMA
KMALLOC_DMA = KMALLOC_NORMAL,
#endif
#ifndef CONFIG_MEMCG_KMEM
#ifndef CONFIG_MEMCG
KMALLOC_CGROUP = KMALLOC_NORMAL,
#endif
KMALLOC_RANDOM_START = KMALLOC_NORMAL,
Expand All @@ -420,7 +420,7 @@ enum kmalloc_cache_type {
#ifdef CONFIG_ZONE_DMA
KMALLOC_DMA,
#endif
#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
KMALLOC_CGROUP,
#endif
NR_KMALLOC_TYPES
Expand All @@ -435,7 +435,7 @@ kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
#define KMALLOC_NOT_NORMAL_BITS \
(__GFP_RECLAIMABLE | \
(IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
(IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
(IS_ENABLED(CONFIG_MEMCG) ? __GFP_ACCOUNT : 0))

extern unsigned long random_kmalloc_seed;

Expand Down Expand Up @@ -463,7 +463,7 @@ static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigne
*/
if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
return KMALLOC_DMA;
if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
if (!IS_ENABLED(CONFIG_MEMCG) || (flags & __GFP_RECLAIMABLE))
return KMALLOC_RECLAIM;
else
return KMALLOC_CGROUP;
Expand Down
4 changes: 2 additions & 2 deletions include/trace/events/kmem.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ TRACE_EVENT(kmem_cache_alloc,
__entry->bytes_alloc = s->size;
__entry->gfp_flags = (__force unsigned long)gfp_flags;
__entry->node = node;
__entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
__entry->accounted = IS_ENABLED(CONFIG_MEMCG) ?
((gfp_flags & __GFP_ACCOUNT) ||
(s->flags & SLAB_ACCOUNT)) : false;
),
Expand Down Expand Up @@ -87,7 +87,7 @@ TRACE_EVENT(kmalloc,
__entry->bytes_alloc,
show_gfp_flags(__entry->gfp_flags),
__entry->node,
(IS_ENABLED(CONFIG_MEMCG_KMEM) &&
(IS_ENABLED(CONFIG_MEMCG) &&
(__entry->gfp_flags & (__force unsigned long)__GFP_ACCOUNT)) ? "true" : "false")
);

Expand Down
5 changes: 0 additions & 5 deletions init/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -986,11 +986,6 @@ config MEMCG_V1

San N is unsure.

config MEMCG_KMEM
bool
depends on MEMCG
default y

config BLK_CGROUP
bool "IO controller"
depends on BLOCK
Expand Down
9 changes: 3 additions & 6 deletions kernel/bpf/memalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -155,12 +155,9 @@ static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags)

static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
{
#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
if (c->objcg)
return get_mem_cgroup_from_objcg(c->objcg);
#endif

#ifdef CONFIG_MEMCG
return root_mem_cgroup;
#else
return NULL;
Expand Down Expand Up @@ -534,7 +531,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
size += LLIST_NODE_SZ; /* room for llist_node */
unit_size = size;

#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
if (memcg_bpf_enabled())
objcg = get_obj_cgroup_from_current();
#endif
Expand All @@ -556,7 +553,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
if (!pcc)
return -ENOMEM;
#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
objcg = get_obj_cgroup_from_current();
#endif
ma->objcg = objcg;
Expand Down
6 changes: 3 additions & 3 deletions kernel/bpf/syscall.c
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,7 @@ void bpf_map_free_id(struct bpf_map *map)
spin_unlock_irqrestore(&map_idr_lock, flags);
}

#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
static void bpf_map_save_memcg(struct bpf_map *map)
{
/* Currently if a map is created by a process belonging to the root
Expand Down Expand Up @@ -486,7 +486,7 @@ int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
unsigned long i, j;
struct page *pg;
int ret = 0;
#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
struct mem_cgroup *memcg, *old_memcg;

memcg = bpf_map_get_memcg(map);
Expand All @@ -505,7 +505,7 @@ int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
break;
}

#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
set_active_memcg(old_memcg);
mem_cgroup_put(memcg);
#endif
Expand Down
6 changes: 3 additions & 3 deletions mm/kfence/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -602,7 +602,7 @@ static unsigned long kfence_init_pool(void)
continue;

__folio_set_slab(slab_folio(slab));
#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts |
MEMCG_DATA_OBJEXTS;
#endif
Expand Down Expand Up @@ -652,7 +652,7 @@ static unsigned long kfence_init_pool(void)

if (!i || (i % 2))
continue;
#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
slab->obj_exts = 0;
#endif
__folio_clear_slab(slab_folio(slab));
Expand Down Expand Up @@ -1146,7 +1146,7 @@ void __kfence_free(void *addr)
{
struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);

#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
KFENCE_WARN_ON(meta->obj_exts.objcg);
#endif
/*
Expand Down
2 changes: 1 addition & 1 deletion mm/kfence/kfence.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ struct kfence_metadata {
struct kfence_track free_track;
/* For updating alloc_covered on frees. */
u32 alloc_stack_hash;
#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
struct slabobj_ext obj_exts;
#endif
};
Expand Down
14 changes: 7 additions & 7 deletions mm/list_lru.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
#include "slab.h"
#include "internal.h"

#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
static LIST_HEAD(memcg_list_lrus);
static DEFINE_MUTEX(list_lrus_mutex);

Expand Down Expand Up @@ -83,7 +83,7 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
{
return &lru->node[nid].lru;
}
#endif /* CONFIG_MEMCG_KMEM */
#endif /* CONFIG_MEMCG */

bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
struct mem_cgroup *memcg)
Expand Down Expand Up @@ -294,7 +294,7 @@ unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
nr_to_walk);

#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
struct list_lru_memcg *mlru;
unsigned long index;
Expand Down Expand Up @@ -324,7 +324,7 @@ static void init_one_lru(struct list_lru_one *l)
l->nr_items = 0;
}

#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
static struct list_lru_memcg *memcg_init_list_lru_one(gfp_t gfp)
{
int nid;
Expand Down Expand Up @@ -544,14 +544,14 @@ static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
static void memcg_destroy_list_lru(struct list_lru *lru)
{
}
#endif /* CONFIG_MEMCG_KMEM */
#endif /* CONFIG_MEMCG */

int __list_lru_init(struct list_lru *lru, bool memcg_aware,
struct lock_class_key *key, struct shrinker *shrinker)
{
int i;

#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
if (shrinker)
lru->shrinker_id = shrinker->id;
else
Expand Down Expand Up @@ -591,7 +591,7 @@ void list_lru_destroy(struct list_lru *lru)
kfree(lru->node);
lru->node = NULL;

#ifdef CONFIG_MEMCG_KMEM
#ifdef CONFIG_MEMCG
lru->shrinker_id = -1;
#endif
}
Expand Down
6 changes: 2 additions & 4 deletions mm/memcontrol-v1.c
Original file line number Diff line number Diff line change
Expand Up @@ -2756,7 +2756,7 @@ static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
return 0;
}

#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
#ifdef CONFIG_SLUB_DEBUG
static int mem_cgroup_slab_show(struct seq_file *m, void *p)
{
/*
Expand Down Expand Up @@ -2863,7 +2863,7 @@ struct cftype mem_cgroup_legacy_files[] = {
.write = mem_cgroup_reset,
.read_u64 = mem_cgroup_read_u64,
},
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
#ifdef CONFIG_SLUB_DEBUG
{
.name = "kmem.slabinfo",
.seq_show = mem_cgroup_slab_show,
Expand Down Expand Up @@ -2922,7 +2922,6 @@ struct cftype memsw_files[] = {
{ }, /* terminate */
};

#ifdef CONFIG_MEMCG_KMEM
void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages)
{
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
Expand All @@ -2932,7 +2931,6 @@ void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages)
page_counter_uncharge(&memcg->kmem, -nr_pages);
}
}
#endif /* CONFIG_MEMCG_KMEM */

bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
gfp_t gfp_mask)
Expand Down
Loading

0 comments on commit 3a3b7fe

Please sign in to comment.