Skip to content

Commit

Permalink
mm, page_owner: make init_pages_in_zone() faster
Browse files Browse the repository at this point in the history
In init_pages_in_zone() we currently use the generic set_page_owner()
function to initialize page_owner info for early allocated pages.  This
means we needlessly do lookup_page_ext() twice for each page, and more
importantly save_stack(), which has to unwind the stack and find the
corresponding stack depot handle.  Because the stack is always the same
for the initialization, unwind it once in init_pages_in_zone() and reuse
the handle.  Also avoid the repeated lookup_page_ext().

This can significantly reduce boot times with page_owner=on on large
machines, especially for kernels built without frame pointer, where the
stack unwinding is noticeably slower.

[[email protected]: don't duplicate code of __set_page_owner(), per Michal Hocko]
[[email protected]: coding-style fixes]
[[email protected]: create statically allocated fake stack trace for early allocated pages, per Michal]
  Link: http://lkml.kernel.org/r/[email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Vlastimil Babka <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Yang Shi <[email protected]>
Cc: Laura Abbott <[email protected]>
Cc: Vinayak Menon <[email protected]>
Cc: zhong jiang <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
tehcaster authored and torvalds committed Sep 7, 2017
1 parent b95046b commit dab4ead
Showing 1 changed file with 32 additions and 20 deletions.
52 changes: 32 additions & 20 deletions mm/page_owner.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ DEFINE_STATIC_KEY_FALSE(page_owner_inited);

static depot_stack_handle_t dummy_handle;
static depot_stack_handle_t failure_handle;
static depot_stack_handle_t early_handle;

static void init_early_allocated_pages(void);

Expand All @@ -53,7 +54,7 @@ static bool need_page_owner(void)
return true;
}

static noinline void register_dummy_stack(void)
static __always_inline depot_stack_handle_t create_dummy_stack(void)
{
unsigned long entries[4];
struct stack_trace dummy;
Expand All @@ -64,21 +65,22 @@ static noinline void register_dummy_stack(void)
dummy.skip = 0;

save_stack_trace(&dummy);
dummy_handle = depot_save_stack(&dummy, GFP_KERNEL);
return depot_save_stack(&dummy, GFP_KERNEL);
}

static noinline void register_failure_stack(void)
static noinline void register_dummy_stack(void)
{
unsigned long entries[4];
struct stack_trace failure;
dummy_handle = create_dummy_stack();
}

failure.nr_entries = 0;
failure.max_entries = ARRAY_SIZE(entries);
failure.entries = &entries[0];
failure.skip = 0;
static noinline void register_failure_stack(void)
{
failure_handle = create_dummy_stack();
}

save_stack_trace(&failure);
failure_handle = depot_save_stack(&failure, GFP_KERNEL);
static noinline void register_early_stack(void)
{
early_handle = create_dummy_stack();
}

static void init_page_owner(void)
Expand All @@ -88,6 +90,7 @@ static void init_page_owner(void)

register_dummy_stack();
register_failure_stack();
register_early_stack();
static_branch_enable(&page_owner_inited);
init_early_allocated_pages();
}
Expand Down Expand Up @@ -165,24 +168,33 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
return handle;
}

noinline void __set_page_owner(struct page *page, unsigned int order,
gfp_t gfp_mask)
static inline void __set_page_owner_handle(struct page_ext *page_ext,
depot_stack_handle_t handle, unsigned int order, gfp_t gfp_mask)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_owner *page_owner;

if (unlikely(!page_ext))
return;

page_owner = get_page_owner(page_ext);
page_owner->handle = save_stack(gfp_mask);
page_owner->handle = handle;
page_owner->order = order;
page_owner->gfp_mask = gfp_mask;
page_owner->last_migrate_reason = -1;

__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
}

noinline void __set_page_owner(struct page *page, unsigned int order,
gfp_t gfp_mask)
{
struct page_ext *page_ext = lookup_page_ext(page);
depot_stack_handle_t handle;

if (unlikely(!page_ext))
return;

handle = save_stack(gfp_mask);
__set_page_owner_handle(page_ext, handle, order, gfp_mask);
}

void __set_page_owner_migrate_reason(struct page *page, int reason)
{
struct page_ext *page_ext = lookup_page_ext(page);
Expand Down Expand Up @@ -565,12 +577,12 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
if (unlikely(!page_ext))
continue;

/* Maybe overraping zone */
/* Maybe overlapping zone */
if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
continue;

/* Found early allocated page */
set_page_owner(page, 0, 0);
__set_page_owner_handle(page_ext, early_handle, 0, 0);
count++;
}
}
Expand Down

0 comments on commit dab4ead

Please sign in to comment.