Skip to content

Commit

Permalink
vmscan: only defer compaction for failed order and higher
Browse files Browse the repository at this point in the history
Currently a failed order-9 (transparent hugepage) compaction can lead to
memory compaction being temporarily disabled for a memory zone.  Even if
we only need compaction for an order 2 allocation, eg.  for jumbo frames
networking.

The fix is relatively straightforward: keep track of the highest order at
which compaction is succeeding, and only defer compaction for orders at
which compaction is failing.

Signed-off-by: Rik van Riel <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Acked-by: Mel Gorman <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: KOSAKI Motohiro <[email protected]>
Cc: Hillf Danton <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Rik van Riel authored and torvalds committed Mar 22, 2012
1 parent 7be62de commit aff6224
Show file tree
Hide file tree
Showing 5 changed files with 27 additions and 8 deletions.
14 changes: 10 additions & 4 deletions include/linux/compaction.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,20 +34,26 @@ extern unsigned long compaction_suitable(struct zone *zone, int order);
* allocation success. 1 << compact_defer_limit compactions are skipped up
* to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
*/
static inline void defer_compaction(struct zone *zone)
static inline void defer_compaction(struct zone *zone, int order)
{
zone->compact_considered = 0;
zone->compact_defer_shift++;

if (order < zone->compact_order_failed)
zone->compact_order_failed = order;

if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
}

/* Returns true if compaction should be skipped this time */
static inline bool compaction_deferred(struct zone *zone)
static inline bool compaction_deferred(struct zone *zone, int order)
{
unsigned long defer_limit = 1UL << zone->compact_defer_shift;

if (order < zone->compact_order_failed)
return false;

/* Avoid possible overflow */
if (++zone->compact_considered > defer_limit)
zone->compact_considered = defer_limit;
Expand All @@ -73,11 +79,11 @@ static inline unsigned long compaction_suitable(struct zone *zone, int order)
return COMPACT_SKIPPED;
}

static inline void defer_compaction(struct zone *zone)
static inline void defer_compaction(struct zone *zone, int order)
{
}

static inline bool compaction_deferred(struct zone *zone)
static inline bool compaction_deferred(struct zone *zone, int order)
{
return 1;
}
Expand Down
1 change: 1 addition & 0 deletions include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -365,6 +365,7 @@ struct zone {
*/
unsigned int compact_considered;
unsigned int compact_defer_shift;
int compact_order_failed;
#endif

ZONE_PADDING(_pad1_)
Expand Down
12 changes: 11 additions & 1 deletion mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -695,9 +695,19 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
INIT_LIST_HEAD(&cc->freepages);
INIT_LIST_HEAD(&cc->migratepages);

if (cc->order < 0 || !compaction_deferred(zone))
if (cc->order < 0 || !compaction_deferred(zone, cc->order))
compact_zone(zone, cc);

if (cc->order > 0) {
int ok = zone_watermark_ok(zone, cc->order,
low_wmark_pages(zone), 0, 0);
if (ok && cc->order > zone->compact_order_failed)
zone->compact_order_failed = cc->order + 1;
/* Currently async compaction is never deferred. */
else if (!ok && cc->sync)
defer_compaction(zone, cc->order);
}

VM_BUG_ON(!list_empty(&cc->freepages));
VM_BUG_ON(!list_empty(&cc->migratepages));
}
Expand Down
6 changes: 4 additions & 2 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1990,7 +1990,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
if (!order)
return NULL;

if (compaction_deferred(preferred_zone)) {
if (compaction_deferred(preferred_zone, order)) {
*deferred_compaction = true;
return NULL;
}
Expand All @@ -2012,6 +2012,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
if (page) {
preferred_zone->compact_considered = 0;
preferred_zone->compact_defer_shift = 0;
if (order >= preferred_zone->compact_order_failed)
preferred_zone->compact_order_failed = order + 1;
count_vm_event(COMPACTSUCCESS);
return page;
}
Expand All @@ -2028,7 +2030,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
* defer if the failure was a sync compaction failure.
*/
if (sync_migration)
defer_compaction(preferred_zone);
defer_compaction(preferred_zone, order);

cond_resched();
}
Expand Down
2 changes: 1 addition & 1 deletion mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -2198,7 +2198,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
* If compaction is deferred, reclaim up to a point where
* compaction will have a chance of success when re-enabled
*/
if (compaction_deferred(zone))
if (compaction_deferred(zone, sc->order))
return watermark_ok;

/* If compaction is not ready to start, keep reclaiming */
Expand Down

0 comments on commit aff6224

Please sign in to comment.