Skip to content

Commit

Permalink
mm: list_lru: rename list_lru_per_memcg to list_lru_memcg
Browse files Browse the repository at this point in the history
The name of list_lru_memcg was occupied before and became free since
last commit.  Rename list_lru_per_memcg to list_lru_memcg since the name
is brief.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Muchun Song <[email protected]>
Cc: Alex Shi <[email protected]>
Cc: Anna Schumaker <[email protected]>
Cc: Chao Yu <[email protected]>
Cc: Dave Chinner <[email protected]>
Cc: Fam Zheng <[email protected]>
Cc: Jaegeuk Kim <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Kari Argillander <[email protected]>
Cc: Matthew Wilcox (Oracle) <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Qi Zheng <[email protected]>
Cc: Roman Gushchin <[email protected]>
Cc: Shakeel Butt <[email protected]>
Cc: Theodore Ts'o <[email protected]>
Cc: Trond Myklebust <[email protected]>
Cc: Vladimir Davydov <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Wei Yang <[email protected]>
Cc: Xiongchun Duan <[email protected]>
Cc: Yang Shi <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Muchun Song authored and torvalds committed Mar 22, 2022
1 parent be74050 commit d701107
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 10 deletions.
2 changes: 1 addition & 1 deletion include/linux/list_lru.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ struct list_lru_one {
long nr_items;
};

struct list_lru_per_memcg {
struct list_lru_memcg {
struct rcu_head rcu;
/* array of per cgroup per node lists, indexed by node id */
struct list_lru_one node[];
Expand Down
18 changes: 9 additions & 9 deletions mm/list_lru.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ static inline struct list_lru_one *
list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
{
if (list_lru_memcg_aware(lru) && idx >= 0) {
struct list_lru_per_memcg *mlru = xa_load(&lru->xa, idx);
struct list_lru_memcg *mlru = xa_load(&lru->xa, idx);

return mlru ? &mlru->node[nid] : NULL;
}
Expand Down Expand Up @@ -306,7 +306,7 @@ unsigned long list_lru_walk_node(struct list_lru *lru, int nid,

#ifdef CONFIG_MEMCG_KMEM
if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
struct list_lru_per_memcg *mlru;
struct list_lru_memcg *mlru;
unsigned long index;

xa_for_each(&lru->xa, index, mlru) {
Expand Down Expand Up @@ -335,10 +335,10 @@ static void init_one_lru(struct list_lru_one *l)
}

#ifdef CONFIG_MEMCG_KMEM
static struct list_lru_per_memcg *memcg_init_list_lru_one(gfp_t gfp)
static struct list_lru_memcg *memcg_init_list_lru_one(gfp_t gfp)
{
int nid;
struct list_lru_per_memcg *mlru;
struct list_lru_memcg *mlru;

mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp);
if (!mlru)
Expand All @@ -352,7 +352,7 @@ static struct list_lru_per_memcg *memcg_init_list_lru_one(gfp_t gfp)

static void memcg_list_lru_free(struct list_lru *lru, int src_idx)
{
struct list_lru_per_memcg *mlru = xa_erase_irq(&lru->xa, src_idx);
struct list_lru_memcg *mlru = xa_erase_irq(&lru->xa, src_idx);

/*
* The __list_lru_walk_one() can walk the list of this node.
Expand All @@ -374,7 +374,7 @@ static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
static void memcg_destroy_list_lru(struct list_lru *lru)
{
XA_STATE(xas, &lru->xa, 0);
struct list_lru_per_memcg *mlru;
struct list_lru_memcg *mlru;

if (!list_lru_memcg_aware(lru))
return;
Expand Down Expand Up @@ -475,7 +475,7 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
int i;
unsigned long flags;
struct list_lru_memcg_table {
struct list_lru_per_memcg *mlru;
struct list_lru_memcg *mlru;
struct mem_cgroup *memcg;
} *table;
XA_STATE(xas, &lru->xa, 0);
Expand All @@ -491,7 +491,7 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
/*
* Because the list_lru can be reparented to the parent cgroup's
* list_lru, we should make sure that this cgroup and all its
* ancestors have allocated list_lru_per_memcg.
* ancestors have allocated list_lru_memcg.
*/
for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) {
if (memcg_list_lru_allocated(memcg, lru))
Expand All @@ -510,7 +510,7 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
xas_lock_irqsave(&xas, flags);
while (i--) {
int index = READ_ONCE(table[i].memcg->kmemcg_id);
struct list_lru_per_memcg *mlru = table[i].mlru;
struct list_lru_memcg *mlru = table[i].mlru;

xas_set(&xas, index);
retry:
Expand Down

0 comments on commit d701107

Please sign in to comment.