Skip to content

Commit

Permalink
mm: simplify lock_page_memcg()
Browse files Browse the repository at this point in the history
Now that migration doesn't clear page->mem_cgroup of live pages anymore,
it's safe to make lock_page_memcg() and the memcg stat functions take
pages, and spare the callers from memcg objects.

[[email protected]: fix warnings]
Signed-off-by: Johannes Weiner <[email protected]>
Suggested-by: Vladimir Davydov <[email protected]>
Acked-by: Vladimir Davydov <[email protected]>
Cc: Michal Hocko <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
hnaz authored and torvalds committed Mar 15, 2016
1 parent 6a93ca8 commit 62cccb8
Show file tree
Hide file tree
Showing 12 changed files with 88 additions and 117 deletions.
18 changes: 8 additions & 10 deletions fs/buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -624,14 +624,14 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
* The caller must hold lock_page_memcg().
*/
static void __set_page_dirty(struct page *page, struct address_space *mapping,
struct mem_cgroup *memcg, int warn)
int warn)
{
unsigned long flags;

spin_lock_irqsave(&mapping->tree_lock, flags);
if (page->mapping) { /* Race with truncate? */
WARN_ON_ONCE(warn && !PageUptodate(page));
account_page_dirtied(page, mapping, memcg);
account_page_dirtied(page, mapping);
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
}
Expand Down Expand Up @@ -666,7 +666,6 @@ static void __set_page_dirty(struct page *page, struct address_space *mapping,
int __set_page_dirty_buffers(struct page *page)
{
int newly_dirty;
struct mem_cgroup *memcg;
struct address_space *mapping = page_mapping(page);

if (unlikely(!mapping))
Expand All @@ -686,14 +685,14 @@ int __set_page_dirty_buffers(struct page *page)
* Lock out page->mem_cgroup migration to keep PageDirty
* synchronized with per-memcg dirty page counters.
*/
memcg = lock_page_memcg(page);
lock_page_memcg(page);
newly_dirty = !TestSetPageDirty(page);
spin_unlock(&mapping->private_lock);

if (newly_dirty)
__set_page_dirty(page, mapping, memcg, 1);
__set_page_dirty(page, mapping, 1);

unlock_page_memcg(memcg);
unlock_page_memcg(page);

if (newly_dirty)
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
Expand Down Expand Up @@ -1167,15 +1166,14 @@ void mark_buffer_dirty(struct buffer_head *bh)
if (!test_set_buffer_dirty(bh)) {
struct page *page = bh->b_page;
struct address_space *mapping = NULL;
struct mem_cgroup *memcg;

memcg = lock_page_memcg(page);
lock_page_memcg(page);
if (!TestSetPageDirty(page)) {
mapping = page_mapping(page);
if (mapping)
__set_page_dirty(page, mapping, memcg, 0);
__set_page_dirty(page, mapping, 0);
}
unlock_page_memcg(memcg);
unlock_page_memcg(page);
if (mapping)
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
}
Expand Down
7 changes: 3 additions & 4 deletions fs/xfs/xfs_aops.c
Original file line number Diff line number Diff line change
Expand Up @@ -1957,7 +1957,6 @@ xfs_vm_set_page_dirty(
loff_t end_offset;
loff_t offset;
int newly_dirty;
struct mem_cgroup *memcg;

if (unlikely(!mapping))
return !TestSetPageDirty(page);
Expand All @@ -1981,7 +1980,7 @@ xfs_vm_set_page_dirty(
* Lock out page->mem_cgroup migration to keep PageDirty
* synchronized with per-memcg dirty page counters.
*/
memcg = lock_page_memcg(page);
lock_page_memcg(page);
newly_dirty = !TestSetPageDirty(page);
spin_unlock(&mapping->private_lock);

Expand All @@ -1992,13 +1991,13 @@ xfs_vm_set_page_dirty(
spin_lock_irqsave(&mapping->tree_lock, flags);
if (page->mapping) { /* Race with truncate? */
WARN_ON_ONCE(!PageUptodate(page));
account_page_dirtied(page, mapping, memcg);
account_page_dirtied(page, mapping);
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
}
spin_unlock_irqrestore(&mapping->tree_lock, flags);
}
unlock_page_memcg(memcg);
unlock_page_memcg(page);
if (newly_dirty)
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
return newly_dirty;
Expand Down
35 changes: 17 additions & 18 deletions include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -455,42 +455,42 @@ bool mem_cgroup_oom_synchronize(bool wait);
extern int do_swap_account;
#endif

struct mem_cgroup *lock_page_memcg(struct page *page);
void unlock_page_memcg(struct mem_cgroup *memcg);
void lock_page_memcg(struct page *page);
void unlock_page_memcg(struct page *page);

/**
* mem_cgroup_update_page_stat - update page state statistics
* @memcg: memcg to account against
* @page: the page
* @idx: page state item to account
* @val: number of pages (positive or negative)
*
* Callers must use lock_page_memcg() to prevent double accounting
* when the page is concurrently being moved to another memcg:
*
* memcg = lock_page_memcg(page);
* lock_page_memcg(page);
* if (TestClearPageState(page))
* mem_cgroup_update_page_stat(memcg, state, -1);
* unlock_page_memcg(memcg);
* mem_cgroup_update_page_stat(page, state, -1);
* unlock_page_memcg(page);
*/
static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
static inline void mem_cgroup_update_page_stat(struct page *page,
enum mem_cgroup_stat_index idx, int val)
{
VM_BUG_ON(!rcu_read_lock_held());

if (memcg)
this_cpu_add(memcg->stat->count[idx], val);
if (page->mem_cgroup)
this_cpu_add(page->mem_cgroup->stat->count[idx], val);
}

static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
static inline void mem_cgroup_inc_page_stat(struct page *page,
enum mem_cgroup_stat_index idx)
{
mem_cgroup_update_page_stat(memcg, idx, 1);
mem_cgroup_update_page_stat(page, idx, 1);
}

static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
static inline void mem_cgroup_dec_page_stat(struct page *page,
enum mem_cgroup_stat_index idx)
{
mem_cgroup_update_page_stat(memcg, idx, -1);
mem_cgroup_update_page_stat(page, idx, -1);
}

unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
Expand Down Expand Up @@ -661,12 +661,11 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}

static inline struct mem_cgroup *lock_page_memcg(struct page *page)
static inline void lock_page_memcg(struct page *page)
{
return NULL;
}

static inline void unlock_page_memcg(struct mem_cgroup *memcg)
static inline void unlock_page_memcg(struct page *page)
{
}

Expand All @@ -692,12 +691,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
return false;
}

static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
static inline void mem_cgroup_inc_page_stat(struct page *page,
enum mem_cgroup_stat_index idx)
{
}

static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
static inline void mem_cgroup_dec_page_stat(struct page *page,
enum mem_cgroup_stat_index idx)
{
}
Expand Down
5 changes: 2 additions & 3 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -1291,10 +1291,9 @@ int __set_page_dirty_nobuffers(struct page *page);
int __set_page_dirty_no_writeback(struct page *page);
int redirty_page_for_writepage(struct writeback_control *wbc,
struct page *page);
void account_page_dirtied(struct page *page, struct address_space *mapping,
struct mem_cgroup *memcg);
void account_page_dirtied(struct page *page, struct address_space *mapping);
void account_page_cleaned(struct page *page, struct address_space *mapping,
struct mem_cgroup *memcg, struct bdi_writeback *wb);
struct bdi_writeback *wb);
int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
void cancel_dirty_page(struct page *page);
Expand Down
3 changes: 1 addition & 2 deletions include/linux/pagemap.h
Original file line number Diff line number Diff line change
Expand Up @@ -663,8 +663,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void delete_from_page_cache(struct page *page);
extern void __delete_from_page_cache(struct page *page, void *shadow,
struct mem_cgroup *memcg);
extern void __delete_from_page_cache(struct page *page, void *shadow);
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);

/*
Expand Down
20 changes: 8 additions & 12 deletions mm/filemap.c
Original file line number Diff line number Diff line change
Expand Up @@ -179,8 +179,7 @@ static void page_cache_tree_delete(struct address_space *mapping,
* is safe. The caller must hold the mapping's tree_lock and
* lock_page_memcg().
*/
void __delete_from_page_cache(struct page *page, void *shadow,
struct mem_cgroup *memcg)
void __delete_from_page_cache(struct page *page, void *shadow)
{
struct address_space *mapping = page->mapping;

Expand Down Expand Up @@ -239,8 +238,7 @@ void __delete_from_page_cache(struct page *page, void *shadow,
* anyway will be cleared before returning page into buddy allocator.
*/
if (WARN_ON_ONCE(PageDirty(page)))
account_page_cleaned(page, mapping, memcg,
inode_to_wb(mapping->host));
account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
}

/**
Expand All @@ -254,7 +252,6 @@ void __delete_from_page_cache(struct page *page, void *shadow,
void delete_from_page_cache(struct page *page)
{
struct address_space *mapping = page->mapping;
struct mem_cgroup *memcg;
unsigned long flags;

void (*freepage)(struct page *);
Expand All @@ -263,11 +260,11 @@ void delete_from_page_cache(struct page *page)

freepage = mapping->a_ops->freepage;

memcg = lock_page_memcg(page);
lock_page_memcg(page);
spin_lock_irqsave(&mapping->tree_lock, flags);
__delete_from_page_cache(page, NULL, memcg);
__delete_from_page_cache(page, NULL);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
unlock_page_memcg(memcg);
unlock_page_memcg(page);

if (freepage)
freepage(page);
Expand Down Expand Up @@ -551,7 +548,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
if (!error) {
struct address_space *mapping = old->mapping;
void (*freepage)(struct page *);
struct mem_cgroup *memcg;
unsigned long flags;

pgoff_t offset = old->index;
Expand All @@ -561,9 +557,9 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
new->mapping = mapping;
new->index = offset;

memcg = lock_page_memcg(old);
lock_page_memcg(old);
spin_lock_irqsave(&mapping->tree_lock, flags);
__delete_from_page_cache(old, NULL, memcg);
__delete_from_page_cache(old, NULL);
error = radix_tree_insert(&mapping->page_tree, offset, new);
BUG_ON(error);
mapping->nrpages++;
Expand All @@ -576,7 +572,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
if (PageSwapBacked(new))
__inc_zone_page_state(new, NR_SHMEM);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
unlock_page_memcg(memcg);
unlock_page_memcg(old);
mem_cgroup_migrate(old, new);
radix_tree_preload_end();
if (freepage)
Expand Down
23 changes: 9 additions & 14 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -1690,7 +1690,7 @@ bool mem_cgroup_oom_synchronize(bool handle)
* This function protects unlocked LRU pages from being moved to
* another cgroup and stabilizes their page->mem_cgroup binding.
*/
struct mem_cgroup *lock_page_memcg(struct page *page)
void lock_page_memcg(struct page *page)
{
struct mem_cgroup *memcg;
unsigned long flags;
Expand All @@ -1699,25 +1699,18 @@ struct mem_cgroup *lock_page_memcg(struct page *page)
* The RCU lock is held throughout the transaction. The fast
* path can get away without acquiring the memcg->move_lock
* because page moving starts with an RCU grace period.
*
* The RCU lock also protects the memcg from being freed when
* the page state that is going to change is the only thing
* preventing the page from being uncharged.
* E.g. end-writeback clearing PageWriteback(), which allows
* migration to go ahead and uncharge the page before the
* account transaction might be complete.
*/
rcu_read_lock();

if (mem_cgroup_disabled())
return NULL;
return;
again:
memcg = page->mem_cgroup;
if (unlikely(!memcg))
return NULL;
return;

if (atomic_read(&memcg->moving_account) <= 0)
return memcg;
return;

spin_lock_irqsave(&memcg->move_lock, flags);
if (memcg != page->mem_cgroup) {
Expand All @@ -1733,16 +1726,18 @@ struct mem_cgroup *lock_page_memcg(struct page *page)
memcg->move_lock_task = current;
memcg->move_lock_flags = flags;

return memcg;
return;
}
EXPORT_SYMBOL(lock_page_memcg);

/**
* unlock_page_memcg - unlock a page->mem_cgroup binding
* @memcg: the memcg returned by lock_page_memcg()
* @page: the page
*/
void unlock_page_memcg(struct mem_cgroup *memcg)
void unlock_page_memcg(struct page *page)
{
struct mem_cgroup *memcg = page->mem_cgroup;

if (memcg && memcg->move_lock_task == current) {
unsigned long flags = memcg->move_lock_flags;

Expand Down
Loading

0 comments on commit 62cccb8

Please sign in to comment.