Skip to content

Commit

Permalink
Revert "BACKPORT: mm: fix pageblock heuristic"
Browse files Browse the repository at this point in the history
This reverts commit 81da353e5d0c2f2a8f2cc2f6f2541d481836bb35.
  • Loading branch information
psndna88 authored and 1petro committed Sep 24, 2020
1 parent 0eb678a commit bb579c4
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 17 deletions.
2 changes: 1 addition & 1 deletion mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -1345,7 +1345,7 @@ static int __compact_finished(struct zone *zone, struct compact_control *cc,
* other migratetype buddy lists.
*/
if (find_suitable_fallback(area, order, migratetype,
true, cc->order, &can_steal) != -1)
true, &can_steal) != -1)
return COMPACT_PARTIAL;
}

Expand Down
5 changes: 3 additions & 2 deletions mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -250,8 +250,9 @@ isolate_freepages_range(struct compact_control *cc,
unsigned long
isolate_migratepages_range(struct compact_control *cc,
unsigned long low_pfn, unsigned long end_pfn);
int find_suitable_fallback(struct free_area *area, unsigned int current_order,
int migratetype, bool only_stealable, int start_order, bool *can_steal);
int find_suitable_fallback(struct free_area *area, unsigned int order,
int migratetype, bool only_stealable, bool *can_steal);

#endif

/*
Expand Down
22 changes: 8 additions & 14 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1655,8 +1655,7 @@ static void change_pageblock_range(struct page *pageblock_page,
* is worse than movable allocations stealing from unmovable and reclaimable
* pageblocks.
*/
static bool can_steal_fallback(unsigned int current_order, unsigned int start_order,
int start_mt, int fallback_mt)
static bool can_steal_fallback(unsigned int order, int start_mt)
{
/*
* Leaving this order check is intended, although there is
Expand All @@ -1665,17 +1664,12 @@ static bool can_steal_fallback(unsigned int current_order, unsigned int start_or
* but, below check doesn't guarantee it and that is just heuristic
* so could be changed anytime.
*/
if (current_order >= pageblock_order)
if (order >= pageblock_order)
return true;

/* don't let unmovable allocations cause migrations simply because of free pages */
if ((start_mt != MIGRATE_UNMOVABLE && current_order >= pageblock_order / 2) ||
/* only steal reclaimable page blocks for unmovable allocations */
(start_mt == MIGRATE_UNMOVABLE && fallback_mt != MIGRATE_MOVABLE && current_order >= pageblock_order / 2) ||
/* reclaimable can steal aggressively */
if (order >= pageblock_order / 2 ||
start_mt == MIGRATE_RECLAIMABLE ||
/* allow unmovable allocs up to 64K without migrating blocks */
(start_mt == MIGRATE_UNMOVABLE && start_order >= 5) ||
start_mt == MIGRATE_UNMOVABLE ||
page_group_by_mobility_disabled)
return true;

Expand Down Expand Up @@ -1715,8 +1709,8 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
* we can steal other freepages all together. This would help to reduce
* fragmentation due to mixed migratetype pages in one pageblock.
*/
int find_suitable_fallback(struct free_area *area, unsigned int current_order,
int migratetype, bool only_stealable, int start_order, bool *can_steal)
int find_suitable_fallback(struct free_area *area, unsigned int order,
int migratetype, bool only_stealable, bool *can_steal)
{
int i;
int fallback_mt;
Expand All @@ -1733,7 +1727,7 @@ int find_suitable_fallback(struct free_area *area, unsigned int current_order,
if (list_empty(&area->free_list[fallback_mt]))
continue;

if (can_steal_fallback(current_order, start_order, migratetype, fallback_mt))
if (can_steal_fallback(order, migratetype))
*can_steal = true;

if (!only_stealable)
Expand Down Expand Up @@ -1869,7 +1863,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
--current_order) {
area = &(zone->free_area[current_order]);
fallback_mt = find_suitable_fallback(area, current_order,
start_migratetype, false, order, &can_steal);
start_migratetype, false, &can_steal);
if (fallback_mt == -1)
continue;

Expand Down

0 comments on commit bb579c4

Please sign in to comment.