Skip to content

Commit

Permalink
page_ext: introduce boot parameter 'early_page_ext'
Browse files Browse the repository at this point in the history
In commit 2f1ee09 ("Revert "mm: use early_pfn_to_nid in
page_ext_init""), we call page_ext_init() after page_alloc_init_late() to
avoid some panic problem.  It seems that we cannot track early page
allocations in current kernel even if page structure has been initialized
early.

This patch introduces a new boot parameter 'early_page_ext' to resolve
this problem.  If we pass it to the kernel, page_ext_init() will be moved
up and the feature 'deferred initialization of struct pages' will be
disabled to initialize the page allocator early and prevent the panic
problem above.  It can help us to catch early page allocations.  This is
useful especially when we find that the free memory value is not the same
right after different kernel booting.

[[email protected]: fix section issue by removing __meminitdata]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Li Zhe <[email protected]>
Suggested-by: Michal Hocko <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
Cc: Jason A. Donenfeld <[email protected]>
Cc: Jonathan Corbet <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Mark-PK Tsai <[email protected]>
Cc: Masami Hiramatsu (Google) <[email protected]>
Cc: Steven Rostedt <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
Li Zhe authored and akpm00 committed Sep 12, 2022
1 parent 1a6baaa commit c4f20f1
Show file tree
Hide file tree
Showing 5 changed files with 34 additions and 1 deletion.
8 changes: 8 additions & 0 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1471,6 +1471,14 @@
Permit 'security.evm' to be updated regardless of
current integrity status.

early_page_ext [KNL] Enforces page_ext initialization to earlier
stages so cover more early boot allocations.
Please note that as side effect some optimizations
might be disabled to achieve that (e.g. parallelized
memory initialization is disabled) so the boot process
might take longer, especially on systems with a lot of
memory. Available with CONFIG_PAGE_EXTENSION=y.

failslab=
fail_usercopy=
fail_page_alloc=
Expand Down
11 changes: 11 additions & 0 deletions include/linux/page_ext.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,15 @@ struct page_ext {
unsigned long flags;
};

extern bool early_page_ext;
extern unsigned long page_ext_size;
extern void pgdat_page_ext_init(struct pglist_data *pgdat);

static inline bool early_page_ext_enabled(void)
{
return early_page_ext;
}

#ifdef CONFIG_SPARSEMEM
static inline void page_ext_init_flatmem(void)
{
Expand Down Expand Up @@ -68,6 +74,11 @@ static inline struct page_ext *page_ext_next(struct page_ext *curr)
#else /* !CONFIG_PAGE_EXTENSION */
struct page_ext;

static inline bool early_page_ext_enabled(void)
{
return false;
}

static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
{
}
Expand Down
6 changes: 5 additions & 1 deletion init/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -849,6 +849,9 @@ static void __init mm_init(void)
pgtable_init();
debug_objects_mem_init();
vmalloc_init();
/* Should be run after vmap initialization */
if (early_page_ext_enabled())
page_ext_init();
/* Should be run before the first non-init thread is created */
init_espfix_bsp();
/* Should be run after espfix64 is set up. */
Expand Down Expand Up @@ -1618,7 +1621,8 @@ static noinline void __init kernel_init_freeable(void)
padata_init();
page_alloc_init_late();
/* Initialize page ext after all struct pages are initialized. */
page_ext_init();
if (!early_page_ext_enabled())
page_ext_init();

do_basic_setup();

Expand Down
2 changes: 2 additions & 0 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -482,6 +482,8 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
{
static unsigned long prev_end_pfn, nr_initialised;

if (early_page_ext_enabled())
return false;
/*
* prev_end_pfn static that contains the end of previous zone
* No need to protect because called very early in boot before smp_init.
Expand Down
8 changes: 8 additions & 0 deletions mm/page_ext.c
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,14 @@ unsigned long page_ext_size = sizeof(struct page_ext);
static unsigned long total_usage;
static struct page_ext *lookup_page_ext(const struct page *page);

bool early_page_ext;
static int __init setup_early_page_ext(char *str)
{
early_page_ext = true;
return 0;
}
early_param("early_page_ext", setup_early_page_ext);

static bool __init invoke_need_callbacks(void)
{
int i;
Expand Down

0 comments on commit c4f20f1

Please sign in to comment.