Skip to content

Commit 6c77b60

Browse files
Kefeng Wangakpm00
Kefeng Wang
authored andcommitted
mm: kill lock|unlock_page_memcg()
Since commit c7c3dec ("mm: rmap: remove lock_page_memcg()"), no more user, kill lock_page_memcg() and unlock_page_memcg(). Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Kefeng Wang <[email protected]> Acked-by: Johannes Weiner <[email protected]> Reviewed-by: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 399fd49 commit 6c77b60

File tree

5 files changed

+10
-30
lines changed

5 files changed

+10
-30
lines changed

Documentation/admin-guide/cgroup-v1/memory.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -297,7 +297,7 @@ Lock order is as follows::
297297

298298
Page lock (PG_locked bit of page->flags)
299299
mm->page_table_lock or split pte_lock
300-
lock_page_memcg (memcg->move_lock)
300+
folio_memcg_lock (memcg->move_lock)
301301
mapping->i_pages lock
302302
lruvec->lru_lock.
303303

include/linux/memcontrol.h

+1-11
Original file line numberDiff line numberDiff line change
@@ -419,7 +419,7 @@ static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
419419
*
420420
* - the folio lock
421421
* - LRU isolation
422-
* - lock_page_memcg()
422+
* - folio_memcg_lock()
423423
* - exclusive reference
424424
* - mem_cgroup_trylock_pages()
425425
*
@@ -949,8 +949,6 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
949949

950950
void folio_memcg_lock(struct folio *folio);
951951
void folio_memcg_unlock(struct folio *folio);
952-
void lock_page_memcg(struct page *page);
953-
void unlock_page_memcg(struct page *page);
954952

955953
void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
956954

@@ -1438,14 +1436,6 @@ mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
14381436
{
14391437
}
14401438

1441-
static inline void lock_page_memcg(struct page *page)
1442-
{
1443-
}
1444-
1445-
static inline void unlock_page_memcg(struct page *page)
1446-
{
1447-
}
1448-
14491439
static inline void folio_memcg_lock(struct folio *folio)
14501440
{
14511441
}

mm/filemap.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@
117117
* ->i_pages lock (page_remove_rmap->set_page_dirty)
118118
* bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
119119
* ->inode->i_lock (page_remove_rmap->set_page_dirty)
120-
* ->memcg->move_lock (page_remove_rmap->lock_page_memcg)
120+
* ->memcg->move_lock (page_remove_rmap->folio_memcg_lock)
121121
* bdi.wb->list_lock (zap_pte_range->set_page_dirty)
122122
* ->inode->i_lock (zap_pte_range->set_page_dirty)
123123
* ->private_lock (zap_pte_range->block_dirty_folio)

mm/memcontrol.c

+4-14
Original file line numberDiff line numberDiff line change
@@ -2148,17 +2148,12 @@ void folio_memcg_lock(struct folio *folio)
21482148
* When charge migration first begins, we can have multiple
21492149
* critical sections holding the fast-path RCU lock and one
21502150
* holding the slowpath move_lock. Track the task who has the
2151-
* move_lock for unlock_page_memcg().
2151+
* move_lock for folio_memcg_unlock().
21522152
*/
21532153
memcg->move_lock_task = current;
21542154
memcg->move_lock_flags = flags;
21552155
}
21562156

2157-
void lock_page_memcg(struct page *page)
2158-
{
2159-
folio_memcg_lock(page_folio(page));
2160-
}
2161-
21622157
static void __folio_memcg_unlock(struct mem_cgroup *memcg)
21632158
{
21642159
if (memcg && memcg->move_lock_task == current) {
@@ -2186,11 +2181,6 @@ void folio_memcg_unlock(struct folio *folio)
21862181
__folio_memcg_unlock(folio_memcg(folio));
21872182
}
21882183

2189-
void unlock_page_memcg(struct page *page)
2190-
{
2191-
folio_memcg_unlock(page_folio(page));
2192-
}
2193-
21942184
struct memcg_stock_pcp {
21952185
local_lock_t stock_lock;
21962186
struct mem_cgroup *cached; /* this never be root cgroup */
@@ -2866,7 +2856,7 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
28662856
*
28672857
* - the page lock
28682858
* - LRU isolation
2869-
* - lock_page_memcg()
2859+
* - folio_memcg_lock()
28702860
* - exclusive reference
28712861
* - mem_cgroup_trylock_pages()
28722862
*/
@@ -5829,7 +5819,7 @@ static int mem_cgroup_move_account(struct page *page,
58295819
* with (un)charging, migration, LRU putback, or anything else
58305820
* that would rely on a stable page's memory cgroup.
58315821
*
5832-
* Note that lock_page_memcg is a memcg lock, not a page lock,
5822+
* Note that folio_memcg_lock is a memcg lock, not a page lock,
58335823
* to save space. As soon as we switch page's memory cgroup to a
58345824
* new memcg that isn't locked, the above state can change
58355825
* concurrently again. Make sure we're truly done with it.
@@ -6320,7 +6310,7 @@ static void mem_cgroup_move_charge(void)
63206310
{
63216311
lru_add_drain_all();
63226312
/*
6323-
* Signal lock_page_memcg() to take the memcg's move_lock
6313+
* Signal folio_memcg_lock() to take the memcg's move_lock
63246314
* while we're moving its pages to another memcg. Then wait
63256315
* for already started RCU-only updates to finish.
63266316
*/

mm/page-writeback.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -2597,7 +2597,7 @@ EXPORT_SYMBOL(noop_dirty_folio);
25972597
/*
25982598
* Helper function for set_page_dirty family.
25992599
*
2600-
* Caller must hold lock_page_memcg().
2600+
* Caller must hold folio_memcg_lock().
26012601
*
26022602
* NOTE: This relies on being atomic wrt interrupts.
26032603
*/
@@ -2631,7 +2631,7 @@ static void folio_account_dirtied(struct folio *folio,
26312631
/*
26322632
* Helper function for deaccounting dirty page without writeback.
26332633
*
2634-
* Caller must hold lock_page_memcg().
2634+
* Caller must hold folio_memcg_lock().
26352635
*/
26362636
void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
26372637
{
@@ -2650,7 +2650,7 @@ void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
26502650
* If warn is true, then emit a warning if the folio is not uptodate and has
26512651
* not been truncated.
26522652
*
2653-
* The caller must hold lock_page_memcg(). Most callers have the folio
2653+
* The caller must hold folio_memcg_lock(). Most callers have the folio
26542654
* locked. A few have the folio blocked from truncation through other
26552655
* means (eg zap_vma_pages() has it mapped and is holding the page table
26562656
* lock). This can also be called from mark_buffer_dirty(), which I

0 commit comments

Comments
 (0)