Skip to content

Commit

Permalink
mm, vmscan: avoid passing in classzone_idx unnecessarily to shrink_node
Browse files Browse the repository at this point in the history
shrink_node receives all information it needs about classzone_idx from
sc->reclaim_idx so remove the aliases.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Mel Gorman <[email protected]>
Acked-by: Hillf Danton <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: Rik van Riel <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
gormanm authored and torvalds committed Jul 28, 2016
1 parent a5f5f91 commit 970a39a
Showing 1 changed file with 9 additions and 11 deletions.
20 changes: 9 additions & 11 deletions mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -2428,8 +2428,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
return true;
}

static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc,
enum zone_type classzone_idx)
static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
{
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long nr_reclaimed, nr_scanned;
Expand Down Expand Up @@ -2658,7 +2657,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
if (zone->zone_pgdat == last_pgdat)
continue;
last_pgdat = zone->zone_pgdat;
shrink_node(zone->zone_pgdat, sc, classzone_idx);
shrink_node(zone->zone_pgdat, sc);
}

/*
Expand Down Expand Up @@ -3082,15 +3081,14 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
* This is used to determine if the scanning priority needs to be raised.
*/
static bool kswapd_shrink_node(pg_data_t *pgdat,
int classzone_idx,
struct scan_control *sc)
{
struct zone *zone;
int z;

/* Reclaim a number of pages proportional to the number of zones */
sc->nr_to_reclaim = 0;
for (z = 0; z <= classzone_idx; z++) {
for (z = 0; z <= sc->reclaim_idx; z++) {
zone = pgdat->node_zones + z;
if (!populated_zone(zone))
continue;
Expand All @@ -3102,7 +3100,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
* Historically care was taken to put equal pressure on all zones but
* now pressure is applied based on node LRU order.
*/
shrink_node(pgdat, sc, classzone_idx);
shrink_node(pgdat, sc);

/*
* Fragmentation may mean that the system cannot be rebalanced for
Expand Down Expand Up @@ -3164,7 +3162,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
if (!populated_zone(zone))
continue;

classzone_idx = i;
sc.reclaim_idx = i;
break;
}
}
Expand All @@ -3177,12 +3175,12 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
* zone was balanced even under extreme pressure when the
* overall node may be congested.
*/
for (i = classzone_idx; i >= 0; i--) {
for (i = sc.reclaim_idx; i >= 0; i--) {
zone = pgdat->node_zones + i;
if (!populated_zone(zone))
continue;

if (zone_balanced(zone, sc.order, classzone_idx))
if (zone_balanced(zone, sc.order, sc.reclaim_idx))
goto out;
}

Expand Down Expand Up @@ -3213,7 +3211,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
* enough pages are already being scanned that that high
* watermark would be met at 100% efficiency.
*/
if (kswapd_shrink_node(pgdat, classzone_idx, &sc))
if (kswapd_shrink_node(pgdat, &sc))
raise_priority = false;

/*
Expand Down Expand Up @@ -3676,7 +3674,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
* priorities until we have enough memory freed.
*/
do {
shrink_node(pgdat, &sc, classzone_idx);
shrink_node(pgdat, &sc);
} while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
}

Expand Down

0 comments on commit 970a39a

Please sign in to comment.