Skip to content

Commit

Permalink
[PATCH] VM: rate limit early reclaim
Browse files Browse the repository at this point in the history
When early zone reclaim is turned on the LRU is scanned more frequently when a
zone is low on memory.  This limits when the zone reclaim can be called by
skipping the scan if another thread (either via kswapd or sync reclaim) is
already reclaiming from the zone.

Signed-off-by: Martin Hicks <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Martin Hicks authored and Linus Torvalds committed Jun 22, 2005
1 parent 0c35bba commit 1e7e5a9
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 0 deletions.
2 changes: 2 additions & 0 deletions include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,8 @@ struct zone {
* as it fails a watermark_ok() in __alloc_pages?
*/
int reclaim_pages;
/* A count of how many reclaimers are scanning this zone */
atomic_t reclaim_in_progress;

/*
* prev_priority holds the scanning priority for this zone. It is
Expand Down
1 change: 1 addition & 0 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1738,6 +1738,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
zone->nr_scan_inactive = 0;
zone->nr_active = 0;
zone->nr_inactive = 0;
atomic_set(&zone->reclaim_in_progress, -1);
if (!size)
continue;

Expand Down
10 changes: 10 additions & 0 deletions mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -900,7 +900,9 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */

atomic_inc(&zone->reclaim_in_progress);
shrink_zone(zone, sc);
atomic_dec(&zone->reclaim_in_progress);
}
}

Expand Down Expand Up @@ -1111,7 +1113,9 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages, int order)
sc.nr_reclaimed = 0;
sc.priority = priority;
sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX;
atomic_inc(&zone->reclaim_in_progress);
shrink_zone(zone, &sc);
atomic_dec(&zone->reclaim_in_progress);
reclaim_state->reclaimed_slab = 0;
nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
lru_pages);
Expand Down Expand Up @@ -1354,9 +1358,15 @@ int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order)
else
sc.swap_cluster_max = SWAP_CLUSTER_MAX;

/* Don't reclaim the zone if there are other reclaimers active */
if (!atomic_inc_and_test(&zone->reclaim_in_progress))
goto out;

shrink_zone(zone, &sc);
total_reclaimed = sc.nr_reclaimed;

out:
atomic_dec(&zone->reclaim_in_progress);
return total_reclaimed;
}

Expand Down

0 comments on commit 1e7e5a9

Please sign in to comment.