Skip to content

Commit

Permalink
mm/memcg: apply add/del_page to lruvec
Browse files Browse the repository at this point in the history
Take lruvec further: pass it instead of zone to add_page_to_lru_list() and
del_page_from_lru_list(); and pagevec_lru_move_fn() pass lruvec down to
its target functions.

This cleanup eliminates a swathe of cruft in memcontrol.c, including
mem_cgroup_lru_add_list(), mem_cgroup_lru_del_list() and
mem_cgroup_lru_move_lists() - which never actually touched the lists.

In their place, mem_cgroup_page_lruvec() to decide the lruvec, previously
a side-effect of add, and mem_cgroup_update_lru_size() to maintain the
lru_size stats.

Whilst these are simplifications in their own right, the goal is to bring
the evaluation of lruvec next to the spin_locking of the lrus, in
preparation for a future patch.

Signed-off-by: Hugh Dickins <[email protected]>
Cc: KOSAKI Motohiro <[email protected]>
Acked-by: KAMEZAWA Hiroyuki <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Acked-by: Konstantin Khlebnikov <[email protected]>
Cc: Johannes Weiner <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Hugh Dickins authored and torvalds committed May 29, 2012
1 parent 75b00af commit fa9add6
Show file tree
Hide file tree
Showing 8 changed files with 122 additions and 181 deletions.
32 changes: 7 additions & 25 deletions include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,7 @@ extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask);

struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *,
enum lru_list);
void mem_cgroup_lru_del_list(struct page *, enum lru_list);
struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
enum lru_list, enum lru_list);
struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);

/* For coalescing uncharge for reducing memcg' overhead*/
extern void mem_cgroup_uncharge_start(void);
Expand Down Expand Up @@ -122,8 +118,7 @@ int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec);
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
struct zone_reclaim_stat*
mem_cgroup_get_reclaim_stat_from_page(struct page *page);
void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
struct task_struct *p);
extern void mem_cgroup_replace_page_cache(struct page *oldpage,
Expand Down Expand Up @@ -250,21 +245,8 @@ static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
return &zone->lruvec;
}

static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone,
struct page *page,
enum lru_list lru)
{
return &zone->lruvec;
}

static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
{
}

static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
struct page *page,
enum lru_list from,
enum lru_list to)
static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
struct zone *zone)
{
return &zone->lruvec;
}
Expand Down Expand Up @@ -345,10 +327,10 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
return 0;
}

static inline struct zone_reclaim_stat*
mem_cgroup_get_reclaim_stat_from_page(struct page *page)
static inline void
mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
int increment)
{
return NULL;
}

static inline void
Expand Down
20 changes: 10 additions & 10 deletions include/linux/mm_inline.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,22 +21,22 @@ static inline int page_is_file_cache(struct page *page)
return !PageSwapBacked(page);
}

static __always_inline void
add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
static __always_inline void add_page_to_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
struct lruvec *lruvec;

lruvec = mem_cgroup_lru_add_list(zone, page, lru);
int nr_pages = hpage_nr_pages(page);
mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
list_add(&page->lru, &lruvec->lists[lru]);
__mod_zone_page_state(zone, NR_LRU_BASE + lru, hpage_nr_pages(page));
__mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages);
}

static __always_inline void
del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
mem_cgroup_lru_del_list(page, lru);
int nr_pages = hpage_nr_pages(page);
mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
list_del(&page->lru);
__mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page));
__mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages);
}

/**
Expand Down
4 changes: 2 additions & 2 deletions include/linux/swap.h
Original file line number Diff line number Diff line change
Expand Up @@ -221,8 +221,8 @@ extern unsigned int nr_free_pagecache_pages(void);
/* linux/mm/swap.c */
extern void __lru_cache_add(struct page *, enum lru_list lru);
extern void lru_cache_add_lru(struct page *, enum lru_list lru);
extern void lru_add_page_tail(struct zone* zone,
struct page *page, struct page *page_tail);
extern void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec);
extern void activate_page(struct page *);
extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void);
Expand Down
5 changes: 4 additions & 1 deletion mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
unsigned long nr_scanned = 0, nr_isolated = 0;
struct list_head *migratelist = &cc->migratepages;
isolate_mode_t mode = 0;
struct lruvec *lruvec;

/*
* Ensure that there are not too many pages isolated from the LRU
Expand Down Expand Up @@ -328,14 +329,16 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
if (cc->mode != COMPACT_SYNC)
mode |= ISOLATE_ASYNC_MIGRATE;

lruvec = mem_cgroup_page_lruvec(page, zone);

/* Try isolate the page */
if (__isolate_lru_page(page, mode) != 0)
continue;

VM_BUG_ON(PageTransCompound(page));

/* Successfully isolated */
del_page_from_lru_list(zone, page, page_lru(page));
del_page_from_lru_list(page, lruvec, page_lru(page));
list_add(&page->lru, migratelist);
cc->nr_migratepages++;
nr_isolated++;
Expand Down
8 changes: 5 additions & 3 deletions mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1231,10 +1231,13 @@ static void __split_huge_page_refcount(struct page *page)
{
int i;
struct zone *zone = page_zone(page);
struct lruvec *lruvec;
int tail_count = 0;

/* prevent PageLRU to go away from under us, and freeze lru stats */
spin_lock_irq(&zone->lru_lock);
lruvec = mem_cgroup_page_lruvec(page, zone);

compound_lock(page);
/* complete memcg works before add pages to LRU */
mem_cgroup_split_huge_fixup(page);
Expand Down Expand Up @@ -1309,13 +1312,12 @@ static void __split_huge_page_refcount(struct page *page)
BUG_ON(!PageDirty(page_tail));
BUG_ON(!PageSwapBacked(page_tail));


lru_add_page_tail(zone, page, page_tail);
lru_add_page_tail(page, page_tail, lruvec);
}
atomic_sub(tail_count, &page->_count);
BUG_ON(atomic_read(&page->_count) <= 0);

__dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
__mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
__mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);

ClearPageCompound(page);
Expand Down
101 changes: 24 additions & 77 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -1035,7 +1035,7 @@ EXPORT_SYMBOL(mem_cgroup_count_vm_event);
/**
* mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
* @zone: zone of the wanted lruvec
* @mem: memcg of the wanted lruvec
* @memcg: memcg of the wanted lruvec
*
* Returns the lru list vector holding pages for the given @zone and
* @mem. This can be the global zone lruvec, if the memory controller
Expand Down Expand Up @@ -1068,19 +1068,11 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
*/

/**
* mem_cgroup_lru_add_list - account for adding an lru page and return lruvec
* @zone: zone of the page
* mem_cgroup_page_lruvec - return lruvec for adding an lru page
* @page: the page
* @lru: current lru
*
* This function accounts for @page being added to @lru, and returns
* the lruvec for the given @zone and the memcg @page is charged to.
*
* The callsite is then responsible for physically linking the page to
* the returned lruvec->lists[@lru].
* @zone: zone of the page
*/
struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
enum lru_list lru)
struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
{
struct mem_cgroup_per_zone *mz;
struct mem_cgroup *memcg;
Expand All @@ -1093,73 +1085,43 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
memcg = pc->mem_cgroup;

/*
* Surreptitiously switch any uncharged page to root:
* Surreptitiously switch any uncharged offlist page to root:
* an uncharged page off lru does nothing to secure
* its former mem_cgroup from sudden removal.
*
* Our caller holds lru_lock, and PageCgroupUsed is updated
* under page_cgroup lock: between them, they make all uses
* of pc->mem_cgroup safe.
*/
if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
pc->mem_cgroup = memcg = root_mem_cgroup;

mz = page_cgroup_zoneinfo(memcg, page);
/* compound_order() is stabilized through lru_lock */
mz->lru_size[lru] += 1 << compound_order(page);
return &mz->lruvec;
}

/**
* mem_cgroup_lru_del_list - account for removing an lru page
* @page: the page
* @lru: target lru
* mem_cgroup_update_lru_size - account for adding or removing an lru page
* @lruvec: mem_cgroup per zone lru vector
* @lru: index of lru list the page is sitting on
* @nr_pages: positive when adding or negative when removing
*
* This function accounts for @page being removed from @lru.
*
* The callsite is then responsible for physically unlinking
* @page->lru.
* This function must be called when a page is added to or removed from an
* lru list.
*/
void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
int nr_pages)
{
struct mem_cgroup_per_zone *mz;
struct mem_cgroup *memcg;
struct page_cgroup *pc;
unsigned long *lru_size;

if (mem_cgroup_disabled())
return;

pc = lookup_page_cgroup(page);
memcg = pc->mem_cgroup;
VM_BUG_ON(!memcg);
mz = page_cgroup_zoneinfo(memcg, page);
/* huge page split is done under lru_lock. so, we have no races. */
VM_BUG_ON(mz->lru_size[lru] < (1 << compound_order(page)));
mz->lru_size[lru] -= 1 << compound_order(page);
}

/**
* mem_cgroup_lru_move_lists - account for moving a page between lrus
* @zone: zone of the page
* @page: the page
* @from: current lru
* @to: target lru
*
* This function accounts for @page being moved between the lrus @from
* and @to, and returns the lruvec for the given @zone and the memcg
* @page is charged to.
*
* The callsite is then responsible for physically relinking
* @page->lru to the returned lruvec->lists[@to].
*/
struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
struct page *page,
enum lru_list from,
enum lru_list to)
{
/* XXX: Optimize this, especially for @from == @to */
mem_cgroup_lru_del_list(page, from);
return mem_cgroup_lru_add_list(zone, page, to);
mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
lru_size = mz->lru_size + lru;
*lru_size += nr_pages;
VM_BUG_ON((long)(*lru_size) < 0);
}

/*
Expand Down Expand Up @@ -1252,24 +1214,6 @@ int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
return (active > inactive);
}

struct zone_reclaim_stat *
mem_cgroup_get_reclaim_stat_from_page(struct page *page)
{
struct page_cgroup *pc;
struct mem_cgroup_per_zone *mz;

if (mem_cgroup_disabled())
return NULL;

pc = lookup_page_cgroup(page);
if (!PageCgroupUsed(pc))
return NULL;
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
smp_rmb();
mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
return &mz->lruvec.reclaim_stat;
}

#define mem_cgroup_from_res_counter(counter, member) \
container_of(counter, struct mem_cgroup, member)

Expand Down Expand Up @@ -2509,6 +2453,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
{
struct page_cgroup *pc = lookup_page_cgroup(page);
struct zone *uninitialized_var(zone);
struct lruvec *lruvec;
bool was_on_lru = false;
bool anon;

Expand All @@ -2531,8 +2476,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
zone = page_zone(page);
spin_lock_irq(&zone->lru_lock);
if (PageLRU(page)) {
lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
ClearPageLRU(page);
del_page_from_lru_list(zone, page, page_lru(page));
del_page_from_lru_list(page, lruvec, page_lru(page));
was_on_lru = true;
}
}
Expand All @@ -2550,9 +2496,10 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,

if (lrucare) {
if (was_on_lru) {
lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
add_page_to_lru_list(zone, page, page_lru(page));
add_page_to_lru_list(page, lruvec, page_lru(page));
}
spin_unlock_irq(&zone->lru_lock);
}
Expand Down
Loading

0 comments on commit fa9add6

Please sign in to comment.