Skip to content

Commit 96f97c4

Browse files
lorenzo-stoakesakpm00
authored andcommitted
mm: mlock: update the interface to use folios
Update the mlock interface to accept folios rather than pages, bringing the interface in line with the internal implementation. munlock_vma_page() still requires a page_folio() conversion, however this is consistent with the existent mlock_vma_page() implementation and a product of rmap still dealing in pages rather than folios. Link: https://lkml.kernel.org/r/cba12777c5544305014bc0cbec56bb4cc71477d8.1673526881.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Cc: Christian Brauner <[email protected]> Cc: Geert Uytterhoeven <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Joel Fernandes (Google) <[email protected]> Cc: Jonathan Corbet <[email protected]> Cc: Liam R. Howlett <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Mike Rapoport (IBM) <[email protected]> Cc: William Kucharski <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent b213ef6 commit 96f97c4

File tree

6 files changed

+49
-45
lines changed

6 files changed

+49
-45
lines changed

mm/internal.h

+22-16
Original file line numberDiff line numberDiff line change
@@ -533,10 +533,9 @@ extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
533533
* should be called with vma's mmap_lock held for read or write,
534534
* under page table lock for the pte/pmd being added or removed.
535535
*
536-
* mlock is usually called at the end of page_add_*_rmap(),
537-
* munlock at the end of page_remove_rmap(); but new anon
538-
* pages are managed by lru_cache_add_inactive_or_unevictable()
539-
* calling mlock_new_page().
536+
* mlock is usually called at the end of page_add_*_rmap(), munlock at
537+
* the end of page_remove_rmap(); but new anon folios are managed by
538+
* folio_add_lru_vma() calling mlock_new_folio().
540539
*
541540
* @compound is used to include pmd mappings of THPs, but filter out
542541
* pte mappings of THPs, which cannot be consistently counted: a pte
@@ -565,18 +564,25 @@ static inline void mlock_vma_page(struct page *page,
565564
mlock_vma_folio(page_folio(page), vma, compound);
566565
}
567566

568-
void munlock_page(struct page *page);
569-
static inline void munlock_vma_page(struct page *page,
567+
void munlock_folio(struct folio *folio);
568+
569+
static inline void munlock_vma_folio(struct folio *folio,
570570
struct vm_area_struct *vma, bool compound)
571571
{
572572
if (unlikely(vma->vm_flags & VM_LOCKED) &&
573-
(compound || !PageTransCompound(page)))
574-
munlock_page(page);
573+
(compound || !folio_test_large(folio)))
574+
munlock_folio(folio);
575+
}
576+
577+
static inline void munlock_vma_page(struct page *page,
578+
struct vm_area_struct *vma, bool compound)
579+
{
580+
munlock_vma_folio(page_folio(page), vma, compound);
575581
}
576-
void mlock_new_page(struct page *page);
577-
bool need_mlock_page_drain(int cpu);
578-
void mlock_page_drain_local(void);
579-
void mlock_page_drain_remote(int cpu);
582+
void mlock_new_folio(struct folio *folio);
583+
bool need_mlock_drain(int cpu);
584+
void mlock_drain_local(void);
585+
void mlock_drain_remote(int cpu);
580586

581587
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
582588

@@ -665,10 +671,10 @@ static inline void mlock_vma_page(struct page *page,
665671
struct vm_area_struct *vma, bool compound) { }
666672
static inline void munlock_vma_page(struct page *page,
667673
struct vm_area_struct *vma, bool compound) { }
668-
static inline void mlock_new_page(struct page *page) { }
669-
static inline bool need_mlock_page_drain(int cpu) { return false; }
670-
static inline void mlock_page_drain_local(void) { }
671-
static inline void mlock_page_drain_remote(int cpu) { }
674+
static inline void mlock_new_folio(struct folio *folio) { }
675+
static inline bool need_mlock_drain(int cpu) { return false; }
676+
static inline void mlock_drain_local(void) { }
677+
static inline void mlock_drain_remote(int cpu) { }
672678
static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
673679
{
674680
}

mm/migrate.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -265,7 +265,7 @@ static bool remove_migration_pte(struct folio *folio,
265265
set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
266266
}
267267
if (vma->vm_flags & VM_LOCKED)
268-
mlock_page_drain_local();
268+
mlock_drain_local();
269269

270270
trace_remove_migration_pte(pvmw.address, pte_val(pte),
271271
compound_order(new));

mm/mlock.c

+18-20
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,7 @@ static void mlock_folio_batch(struct folio_batch *fbatch)
210210
folio_batch_reinit(fbatch);
211211
}
212212

213-
void mlock_page_drain_local(void)
213+
void mlock_drain_local(void)
214214
{
215215
struct folio_batch *fbatch;
216216

@@ -221,7 +221,7 @@ void mlock_page_drain_local(void)
221221
local_unlock(&mlock_fbatch.lock);
222222
}
223223

224-
void mlock_page_drain_remote(int cpu)
224+
void mlock_drain_remote(int cpu)
225225
{
226226
struct folio_batch *fbatch;
227227

@@ -231,7 +231,7 @@ void mlock_page_drain_remote(int cpu)
231231
mlock_folio_batch(fbatch);
232232
}
233233

234-
bool need_mlock_page_drain(int cpu)
234+
bool need_mlock_drain(int cpu)
235235
{
236236
return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu));
237237
}
@@ -262,13 +262,12 @@ void mlock_folio(struct folio *folio)
262262
}
263263

264264
/**
265-
* mlock_new_page - mlock a newly allocated page not yet on LRU
266-
* @page: page to be mlocked, either a normal page or a THP head.
265+
* mlock_new_folio - mlock a newly allocated folio not yet on LRU
266+
* @folio: folio to be mlocked, either normal or a THP head.
267267
*/
268-
void mlock_new_page(struct page *page)
268+
void mlock_new_folio(struct folio *folio)
269269
{
270270
struct folio_batch *fbatch;
271-
struct folio *folio = page_folio(page);
272271
int nr_pages = folio_nr_pages(folio);
273272

274273
local_lock(&mlock_fbatch.lock);
@@ -286,13 +285,12 @@ void mlock_new_page(struct page *page)
286285
}
287286

288287
/**
289-
* munlock_page - munlock a page
290-
* @page: page to be munlocked, either a normal page or a THP head.
288+
* munlock_folio - munlock a folio
289+
* @folio: folio to be munlocked, either normal or a THP head.
291290
*/
292-
void munlock_page(struct page *page)
291+
void munlock_folio(struct folio *folio)
293292
{
294293
struct folio_batch *fbatch;
295-
struct folio *folio = page_folio(page);
296294

297295
local_lock(&mlock_fbatch.lock);
298296
fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
@@ -314,35 +312,35 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
314312
struct vm_area_struct *vma = walk->vma;
315313
spinlock_t *ptl;
316314
pte_t *start_pte, *pte;
317-
struct page *page;
315+
struct folio *folio;
318316

319317
ptl = pmd_trans_huge_lock(pmd, vma);
320318
if (ptl) {
321319
if (!pmd_present(*pmd))
322320
goto out;
323321
if (is_huge_zero_pmd(*pmd))
324322
goto out;
325-
page = pmd_page(*pmd);
323+
folio = page_folio(pmd_page(*pmd));
326324
if (vma->vm_flags & VM_LOCKED)
327-
mlock_folio(page_folio(page));
325+
mlock_folio(folio);
328326
else
329-
munlock_page(page);
327+
munlock_folio(folio);
330328
goto out;
331329
}
332330

333331
start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
334332
for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
335333
if (!pte_present(*pte))
336334
continue;
337-
page = vm_normal_page(vma, addr, *pte);
338-
if (!page || is_zone_device_page(page))
335+
folio = vm_normal_folio(vma, addr, *pte);
336+
if (!folio || folio_is_zone_device(folio))
339337
continue;
340-
if (PageTransCompound(page))
338+
if (folio_test_large(folio))
341339
continue;
342340
if (vma->vm_flags & VM_LOCKED)
343-
mlock_folio(page_folio(page));
341+
mlock_folio(folio);
344342
else
345-
munlock_page(page);
343+
munlock_folio(folio);
346344
}
347345
pte_unmap(start_pte);
348346
out:

mm/page_alloc.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -8587,7 +8587,7 @@ static int page_alloc_cpu_dead(unsigned int cpu)
85878587
struct zone *zone;
85888588

85898589
lru_add_drain_cpu(cpu);
8590-
mlock_page_drain_remote(cpu);
8590+
mlock_drain_remote(cpu);
85918591
drain_pages(cpu);
85928592

85938593
/*

mm/rmap.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -1764,7 +1764,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
17641764
*/
17651765
page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
17661766
if (vma->vm_flags & VM_LOCKED)
1767-
mlock_page_drain_local();
1767+
mlock_drain_local();
17681768
folio_put(folio);
17691769
}
17701770

@@ -2105,7 +2105,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
21052105
*/
21062106
page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
21072107
if (vma->vm_flags & VM_LOCKED)
2108-
mlock_page_drain_local();
2108+
mlock_drain_local();
21092109
folio_put(folio);
21102110
}
21112111

mm/swap.c

+5-5
Original file line numberDiff line numberDiff line change
@@ -562,7 +562,7 @@ void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma)
562562
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
563563

564564
if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED))
565-
mlock_new_page(&folio->page);
565+
mlock_new_folio(folio);
566566
else
567567
folio_add_lru(folio);
568568
}
@@ -781,7 +781,7 @@ void lru_add_drain(void)
781781
local_lock(&cpu_fbatches.lock);
782782
lru_add_drain_cpu(smp_processor_id());
783783
local_unlock(&cpu_fbatches.lock);
784-
mlock_page_drain_local();
784+
mlock_drain_local();
785785
}
786786

787787
/*
@@ -796,7 +796,7 @@ static void lru_add_and_bh_lrus_drain(void)
796796
lru_add_drain_cpu(smp_processor_id());
797797
local_unlock(&cpu_fbatches.lock);
798798
invalidate_bh_lrus_cpu();
799-
mlock_page_drain_local();
799+
mlock_drain_local();
800800
}
801801

802802
void lru_add_drain_cpu_zone(struct zone *zone)
@@ -805,7 +805,7 @@ void lru_add_drain_cpu_zone(struct zone *zone)
805805
lru_add_drain_cpu(smp_processor_id());
806806
drain_local_pages(zone);
807807
local_unlock(&cpu_fbatches.lock);
808-
mlock_page_drain_local();
808+
mlock_drain_local();
809809
}
810810

811811
#ifdef CONFIG_SMP
@@ -828,7 +828,7 @@ static bool cpu_needs_drain(unsigned int cpu)
828828
folio_batch_count(&fbatches->lru_deactivate) ||
829829
folio_batch_count(&fbatches->lru_lazyfree) ||
830830
folio_batch_count(&fbatches->activate) ||
831-
need_mlock_page_drain(cpu) ||
831+
need_mlock_drain(cpu) ||
832832
has_bh_in_lru(cpu, NULL);
833833
}
834834

0 commit comments

Comments
 (0)