Skip to content

Commit

Permalink
proc: rewrite stable_page_flags()
Browse files Browse the repository at this point in the history
Reduce the usage of PageFlag tests and reduce the number of
compound_head() calls.

For multi-page folios, we'll now show all pages as having the flags that
apply to them, e.g.  if it's dirty, all pages will have the dirty flag set
instead of just the head page.  The mapped flag is still per page, as is
the hwpoison flag.

[[email protected]: fix up some bits vs masks]
  Link: https://lkml.kernel.org/r/[email protected]
[[email protected]: fix warnings]
  Link: https://lkml.kernel.org/r/[email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
Reviewed-by: Svetly Todorov <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
Matthew Wilcox (Oracle) authored and akpm00 committed Apr 26, 2024
1 parent 4dc7d37 commit dee3d0b
Show file tree
Hide file tree
Showing 4 changed files with 42 additions and 38 deletions.
69 changes: 37 additions & 32 deletions fs/proc/page.c
Original file line number Diff line number Diff line change
Expand Up @@ -107,60 +107,61 @@ static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
return ((kflags >> kbit) & 1) << ubit;
}

u64 stable_page_flags(struct page *page)
u64 stable_page_flags(const struct page *page)
{
u64 k;
u64 u;
const struct folio *folio;
unsigned long k;
unsigned long mapping;
bool is_anon;
u64 u = 0;

/*
* pseudo flag: KPF_NOPAGE
* it differentiates a memory hole from a page with no flags
*/
if (!page)
return 1 << KPF_NOPAGE;
folio = page_folio(page);

k = page->flags;
u = 0;
k = folio->flags;
mapping = (unsigned long)folio->mapping;
is_anon = mapping & PAGE_MAPPING_ANON;

/*
* pseudo flags for the well known (anonymous) memory mapped pages
*/
if (page_mapped(page))
u |= 1 << KPF_MMAP;
if (PageAnon(page))
if (is_anon) {
u |= 1 << KPF_ANON;
if (PageKsm(page))
u |= 1 << KPF_KSM;
if (mapping & PAGE_MAPPING_KSM)
u |= 1 << KPF_KSM;
}

/*
* compound pages: export both head/tail info
* they together define a compound page's start/end pos and order
*/
if (PageHead(page))
u |= 1 << KPF_COMPOUND_HEAD;
if (PageTail(page))
if (page == &folio->page)
u |= kpf_copy_bit(k, KPF_COMPOUND_HEAD, PG_head);
else
u |= 1 << KPF_COMPOUND_TAIL;
if (PageHuge(page))
if (folio_test_hugetlb(folio))
u |= 1 << KPF_HUGE;
/*
* PageTransCompound can be true for non-huge compound pages (slab
* pages or pages allocated by drivers with __GFP_COMP) because it
* just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
* We need to check PageLRU/PageAnon
* to make sure a given page is a thp, not a non-huge compound page.
*/
else if (PageTransCompound(page)) {
struct page *head = compound_head(page);

if (PageLRU(head) || PageAnon(head))
else if (folio_test_large(folio)) {
if ((k & (1 << PG_lru)) || is_anon)
u |= 1 << KPF_THP;
else if (is_huge_zero_page(head)) {
else if (is_huge_zero_page(&folio->page)) {
u |= 1 << KPF_ZERO_PAGE;
u |= 1 << KPF_THP;
}
} else if (is_zero_pfn(page_to_pfn(page)))
u |= 1 << KPF_ZERO_PAGE;


/*
* Caveats on high order pages: PG_buddy and PG_slab will only be set
* on the head page.
Expand All @@ -174,16 +175,17 @@ u64 stable_page_flags(struct page *page)
u |= 1 << KPF_OFFLINE;
if (PageTable(page))
u |= 1 << KPF_PGTABLE;
if (folio_test_slab(folio))
u |= 1 << KPF_SLAB;

if (page_is_idle(page))
#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
u |= kpf_copy_bit(k, KPF_IDLE, PG_idle);
#else
if (folio_test_idle(folio))
u |= 1 << KPF_IDLE;
#endif

u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);

u |= kpf_copy_bit(k, KPF_SLAB, PG_slab);
if (PageTail(page) && PageSlab(page))
u |= 1 << KPF_SLAB;

u |= kpf_copy_bit(k, KPF_ERROR, PG_error);
u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty);
u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate);
Expand All @@ -194,15 +196,19 @@ u64 stable_page_flags(struct page *page)
u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active);
u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim);

if (PageSwapCache(page))
#define SWAPCACHE ((1 << PG_swapbacked) | (1 << PG_swapcache))
if ((k & SWAPCACHE) == SWAPCACHE)
u |= 1 << KPF_SWAPCACHE;
u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked);

u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable);
u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked);

#ifdef CONFIG_MEMORY_FAILURE
u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
if (u & (1 << KPF_HUGE))
u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
else
u |= kpf_copy_bit(page->flags, KPF_HWPOISON, PG_hwpoison);
#endif

#ifdef CONFIG_ARCH_USES_PG_UNCACHED
Expand All @@ -228,7 +234,6 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
{
const unsigned long max_dump_pfn = get_max_dump_pfn();
u64 __user *out = (u64 __user *)buf;
struct page *ppage;
unsigned long src = *ppos;
unsigned long pfn;
ssize_t ret = 0;
Expand All @@ -245,9 +250,9 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
* TODO: ZONE_DEVICE support requires to identify
* memmaps that were actually initialized.
*/
ppage = pfn_to_online_page(pfn);
struct page *page = pfn_to_online_page(pfn);

if (put_user(stable_page_flags(ppage), out)) {
if (put_user(stable_page_flags(page), out)) {
ret = -EFAULT;
break;
}
Expand Down
4 changes: 2 additions & 2 deletions include/linux/huge_mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
extern struct page *huge_zero_page;
extern unsigned long huge_zero_pfn;

static inline bool is_huge_zero_page(struct page *page)
static inline bool is_huge_zero_page(const struct page *page)
{
return READ_ONCE(huge_zero_page) == page;
}
Expand Down Expand Up @@ -480,7 +480,7 @@ static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
return 0;
}

static inline bool is_huge_zero_page(struct page *page)
static inline bool is_huge_zero_page(const struct page *page)
{
return false;
}
Expand Down
2 changes: 1 addition & 1 deletion include/linux/page-flags.h
Original file line number Diff line number Diff line change
Expand Up @@ -734,7 +734,7 @@ static __always_inline bool PageKsm(const struct page *page)
TESTPAGEFLAG_FALSE(Ksm, ksm)
#endif

u64 stable_page_flags(struct page *page);
u64 stable_page_flags(const struct page *page);

/**
* folio_xor_flags_has_waiters - Change some folio flags.
Expand Down
5 changes: 2 additions & 3 deletions tools/cgroup/memcg_slabinfo.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,12 +146,11 @@ def detect_kernel_config():


def for_each_slab(prog):
PGSlab = 1 << prog.constant('PG_slab')
PGHead = 1 << prog.constant('PG_head')
PGSlab = ~prog.constant('PG_slab')

for page in for_each_page(prog):
try:
if page.flags.value_() & PGSlab:
if page.page_type.value_() == PGSlab:
yield cast('struct slab *', page)
except FaultError:
pass
Expand Down

0 comments on commit dee3d0b

Please sign in to comment.