diff --git a/mm/vmscan.c b/mm/vmscan.c index 1013f37cd815d7..7bfc0fe064e758 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2428,8 +2428,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, return true; } -static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc, - enum zone_type classzone_idx) +static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) { struct reclaim_state *reclaim_state = current->reclaim_state; unsigned long nr_reclaimed, nr_scanned; @@ -2658,7 +2657,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) if (zone->zone_pgdat == last_pgdat) continue; last_pgdat = zone->zone_pgdat; - shrink_node(zone->zone_pgdat, sc, classzone_idx); + shrink_node(zone->zone_pgdat, sc); } /* @@ -3082,7 +3081,6 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, * This is used to determine if the scanning priority needs to be raised. */ static bool kswapd_shrink_node(pg_data_t *pgdat, - int classzone_idx, struct scan_control *sc) { struct zone *zone; @@ -3090,7 +3088,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat, /* Reclaim a number of pages proportional to the number of zones */ sc->nr_to_reclaim = 0; - for (z = 0; z <= classzone_idx; z++) { + for (z = 0; z <= sc->reclaim_idx; z++) { zone = pgdat->node_zones + z; if (!populated_zone(zone)) continue; @@ -3102,7 +3100,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat, * Historically care was taken to put equal pressure on all zones but * now pressure is applied based on node LRU order. */ - shrink_node(pgdat, sc, classzone_idx); + shrink_node(pgdat, sc); /* * Fragmentation may mean that the system cannot be rebalanced for @@ -3164,7 +3162,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) if (!populated_zone(zone)) continue; - classzone_idx = i; + sc.reclaim_idx = i; break; } } @@ -3177,12 +3175,12 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) * zone was balanced even under extreme pressure when the * overall node may be congested. */ - for (i = classzone_idx; i >= 0; i--) { + for (i = sc.reclaim_idx; i >= 0; i--) { zone = pgdat->node_zones + i; if (!populated_zone(zone)) continue; - if (zone_balanced(zone, sc.order, classzone_idx)) + if (zone_balanced(zone, sc.order, sc.reclaim_idx)) goto out; } @@ -3213,7 +3211,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) * enough pages are already being scanned that that high * watermark would be met at 100% efficiency. */ - if (kswapd_shrink_node(pgdat, classzone_idx, &sc)) + if (kswapd_shrink_node(pgdat, &sc)) raise_priority = false; /* @@ -3676,7 +3674,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in * priorities until we have enough memory freed. */ do { - shrink_node(pgdat, &sc, classzone_idx); + shrink_node(pgdat, &sc); } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); }