Skip to content

Commit

Permalink
mm/page_alloc.c: recalculate some of node threshold when on/offline m…
Browse files Browse the repository at this point in the history
…emory

Some of node threshold depends on number of managed pages in the node.
When memory is going on/offline, it can be changed and we need to adjust
them.

Add recalculation to appropriate places and clean-up related functions
for better maintenance.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Joonsoo Kim <[email protected]>
Acked-by: Mel Gorman <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Minchan Kim <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
JoonsooKim authored and torvalds committed Aug 10, 2016
1 parent 81cbcbc commit 6423aa8
Showing 1 changed file with 35 additions and 15 deletions.
50 changes: 35 additions & 15 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -4757,6 +4757,8 @@ int local_memory_node(int node)
}
#endif

static void setup_min_unmapped_ratio(void);
static void setup_min_slab_ratio(void);
#else /* CONFIG_NUMA */

static void set_zonelist_order(void)
Expand Down Expand Up @@ -5878,9 +5880,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
#ifdef CONFIG_NUMA
zone->node = nid;
pgdat->min_unmapped_pages += (freesize*sysctl_min_unmapped_ratio)
/ 100;
pgdat->min_slab_pages += (freesize * sysctl_min_slab_ratio) / 100;
#endif
zone->name = zone_names[j];
zone->zone_pgdat = pgdat;
Expand Down Expand Up @@ -6801,6 +6800,12 @@ int __meminit init_per_zone_wmark_min(void)
setup_per_zone_wmarks();
refresh_zone_stat_thresholds();
setup_per_zone_lowmem_reserve();

#ifdef CONFIG_NUMA
setup_min_unmapped_ratio();
setup_min_slab_ratio();
#endif

return 0;
}
core_initcall(init_per_zone_wmark_min)
Expand Down Expand Up @@ -6842,43 +6847,58 @@ int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
}

#ifdef CONFIG_NUMA
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
static void setup_min_unmapped_ratio(void)
{
struct pglist_data *pgdat;
pg_data_t *pgdat;
struct zone *zone;
int rc;

rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc)
return rc;

for_each_online_pgdat(pgdat)
pgdat->min_unmapped_pages = 0;

for_each_zone(zone)
zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
sysctl_min_unmapped_ratio) / 100;
return 0;
}

int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,

int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
struct pglist_data *pgdat;
struct zone *zone;
int rc;

rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc)
return rc;

setup_min_unmapped_ratio();

return 0;
}

static void setup_min_slab_ratio(void)
{
pg_data_t *pgdat;
struct zone *zone;

for_each_online_pgdat(pgdat)
pgdat->min_slab_pages = 0;

for_each_zone(zone)
zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
sysctl_min_slab_ratio) / 100;
}

int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
int rc;

rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc)
return rc;

setup_min_slab_ratio();

return 0;
}
#endif
Expand Down

0 comments on commit 6423aa8

Please sign in to comment.