Skip to content

Commit

Permalink
mm: memcontrol: track move_lock state internally
Browse files Browse the repository at this point in the history
The complexity of memcg page stat synchronization is currently leaking
into the callsites, forcing them to keep track of the move_lock state and
the IRQ flags.  Simplify the API by tracking it in the memcg.

Signed-off-by: Johannes Weiner <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Reviewed-by: Vladimir Davydov <[email protected]>
Cc: Wu Fengguang <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
hnaz authored and torvalds committed Feb 12, 2015
1 parent 93aa7d9 commit 6de2261
Show file tree
Hide file tree
Showing 4 changed files with 51 additions and 53 deletions.
12 changes: 4 additions & 8 deletions include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -138,12 +138,10 @@ static inline bool mem_cgroup_disabled(void)
return false;
}

struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked,
unsigned long *flags);
void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked,
unsigned long *flags);
struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
enum mem_cgroup_stat_index idx, int val);
void mem_cgroup_end_page_stat(struct mem_cgroup *memcg);

static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
enum mem_cgroup_stat_index idx)
Expand Down Expand Up @@ -285,14 +283,12 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}

static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
bool *locked, unsigned long *flags)
static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
{
return NULL;
}

static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg,
bool *locked, unsigned long *flags)
static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
{
}

Expand Down
68 changes: 39 additions & 29 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -325,9 +325,11 @@ struct mem_cgroup {
/*
* set > 0 if pages under this cgroup are moving to other cgroup.
*/
atomic_t moving_account;
atomic_t moving_account;
/* taken only while moving_account > 0 */
spinlock_t move_lock;
spinlock_t move_lock;
struct task_struct *move_lock_task;
unsigned long move_lock_flags;
/*
* percpu counter.
*/
Expand Down Expand Up @@ -1977,34 +1979,33 @@ bool mem_cgroup_oom_synchronize(bool handle)
/**
* mem_cgroup_begin_page_stat - begin a page state statistics transaction
* @page: page that is going to change accounted state
* @locked: &memcg->move_lock slowpath was taken
* @flags: IRQ-state flags for &memcg->move_lock
*
* This function must mark the beginning of an accounted page state
* change to prevent double accounting when the page is concurrently
* being moved to another memcg:
*
* memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
* memcg = mem_cgroup_begin_page_stat(page);
* if (TestClearPageState(page))
* mem_cgroup_update_page_stat(memcg, state, -1);
* mem_cgroup_end_page_stat(memcg, locked, flags);
*
* The RCU lock is held throughout the transaction. The fast path can
* get away without acquiring the memcg->move_lock (@locked is false)
* because page moving starts with an RCU grace period.
*
* The RCU lock also protects the memcg from being freed when the page
* state that is going to change is the only thing preventing the page
* from being uncharged. E.g. end-writeback clearing PageWriteback(),
* which allows migration to go ahead and uncharge the page before the
* account transaction might be complete.
* mem_cgroup_end_page_stat(memcg);
*/
struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
bool *locked,
unsigned long *flags)
struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
{
struct mem_cgroup *memcg;
unsigned long flags;

/*
* The RCU lock is held throughout the transaction. The fast
* path can get away without acquiring the memcg->move_lock
* because page moving starts with an RCU grace period.
*
* The RCU lock also protects the memcg from being freed when
* the page state that is going to change is the only thing
* preventing the page from being uncharged.
* E.g. end-writeback clearing PageWriteback(), which allows
* migration to go ahead and uncharge the page before the
* account transaction might be complete.
*/
rcu_read_lock();

if (mem_cgroup_disabled())
Expand All @@ -2014,31 +2015,40 @@ struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
if (unlikely(!memcg))
return NULL;

*locked = false;
if (atomic_read(&memcg->moving_account) <= 0)
return memcg;

spin_lock_irqsave(&memcg->move_lock, *flags);
spin_lock_irqsave(&memcg->move_lock, flags);
if (memcg != page->mem_cgroup) {
spin_unlock_irqrestore(&memcg->move_lock, *flags);
spin_unlock_irqrestore(&memcg->move_lock, flags);
goto again;
}
*locked = true;

/*
* When charge migration first begins, we can have locked and
* unlocked page stat updates happening concurrently. Track
* the task who has the lock for mem_cgroup_end_page_stat().
*/
memcg->move_lock_task = current;
memcg->move_lock_flags = flags;

return memcg;
}

/**
* mem_cgroup_end_page_stat - finish a page state statistics transaction
* @memcg: the memcg that was accounted against
* @locked: value received from mem_cgroup_begin_page_stat()
* @flags: value received from mem_cgroup_begin_page_stat()
*/
void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked,
unsigned long *flags)
void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
{
if (memcg && *locked)
spin_unlock_irqrestore(&memcg->move_lock, *flags);
if (memcg && memcg->move_lock_task == current) {
unsigned long flags = memcg->move_lock_flags;

memcg->move_lock_task = NULL;
memcg->move_lock_flags = 0;

spin_unlock_irqrestore(&memcg->move_lock, flags);
}

rcu_read_unlock();
}
Expand Down
12 changes: 4 additions & 8 deletions mm/page-writeback.c
Original file line number Diff line number Diff line change
Expand Up @@ -2308,12 +2308,10 @@ EXPORT_SYMBOL(clear_page_dirty_for_io);
int test_clear_page_writeback(struct page *page)
{
struct address_space *mapping = page_mapping(page);
unsigned long memcg_flags;
struct mem_cgroup *memcg;
bool locked;
int ret;

memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
memcg = mem_cgroup_begin_page_stat(page);
if (mapping) {
struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned long flags;
Expand All @@ -2338,19 +2336,17 @@ int test_clear_page_writeback(struct page *page)
dec_zone_page_state(page, NR_WRITEBACK);
inc_zone_page_state(page, NR_WRITTEN);
}
mem_cgroup_end_page_stat(memcg, &locked, &memcg_flags);
mem_cgroup_end_page_stat(memcg);
return ret;
}

int __test_set_page_writeback(struct page *page, bool keep_write)
{
struct address_space *mapping = page_mapping(page);
unsigned long memcg_flags;
struct mem_cgroup *memcg;
bool locked;
int ret;

memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
memcg = mem_cgroup_begin_page_stat(page);
if (mapping) {
struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned long flags;
Expand Down Expand Up @@ -2380,7 +2376,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
inc_zone_page_state(page, NR_WRITEBACK);
}
mem_cgroup_end_page_stat(memcg, &locked, &memcg_flags);
mem_cgroup_end_page_stat(memcg);
return ret;

}
Expand Down
12 changes: 4 additions & 8 deletions mm/rmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -1085,24 +1085,20 @@ void page_add_new_anon_rmap(struct page *page,
void page_add_file_rmap(struct page *page)
{
struct mem_cgroup *memcg;
unsigned long flags;
bool locked;

memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
memcg = mem_cgroup_begin_page_stat(page);
if (atomic_inc_and_test(&page->_mapcount)) {
__inc_zone_page_state(page, NR_FILE_MAPPED);
mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
}
mem_cgroup_end_page_stat(memcg, &locked, &flags);
mem_cgroup_end_page_stat(memcg);
}

static void page_remove_file_rmap(struct page *page)
{
struct mem_cgroup *memcg;
unsigned long flags;
bool locked;

memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
memcg = mem_cgroup_begin_page_stat(page);

/* page still mapped by someone else? */
if (!atomic_add_negative(-1, &page->_mapcount))
Expand All @@ -1123,7 +1119,7 @@ static void page_remove_file_rmap(struct page *page)
if (unlikely(PageMlocked(page)))
clear_page_mlock(page);
out:
mem_cgroup_end_page_stat(memcg, &locked, &flags);
mem_cgroup_end_page_stat(memcg);
}

/**
Expand Down

0 comments on commit 6de2261

Please sign in to comment.