Skip to content

Commit

Permalink
mm/migrate: add CPU hotplug to demotion #ifdef
Browse files Browse the repository at this point in the history
Once upon a time, the node demotion updates were driven solely by memory
hotplug events.  But now, there are handlers for both CPU and memory
hotplug.

However, the #ifdef around the code checks only memory hotplug.  A
system that has HOTPLUG_CPU=y but MEMORY_HOTPLUG=n would miss CPU
hotplug events.

Update the #ifdef around the common code.  Add memory and CPU-specific
#ifdefs for their handlers.  These memory/CPU #ifdefs avoid unused
function warnings when their Kconfig option is off.

[[email protected]: rework hotplug_memory_notifier() stub]
  Link: https://lkml.kernel.org/r/[email protected]

Link: https://lkml.kernel.org/r/[email protected]
Fixes: 884a6e5 ("mm/migrate: update node demotion order on hotplug events")
Signed-off-by: Dave Hansen <[email protected]>
Signed-off-by: Arnd Bergmann <[email protected]>
Cc: "Huang, Ying" <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Wei Xu <[email protected]>
Cc: Oscar Salvador <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Dan Williams <[email protected]>
Cc: David Hildenbrand <[email protected]>
Cc: Greg Thelen <[email protected]>
Cc: Yang Shi <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
hansendc authored and torvalds committed Oct 19, 2021
1 parent 295be91 commit 76af6a0
Show file tree
Hide file tree
Showing 4 changed files with 28 additions and 27 deletions.
5 changes: 4 additions & 1 deletion include/linux/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,10 @@ int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
#define register_hotmemory_notifier(nb) register_memory_notifier(nb)
#define unregister_hotmemory_notifier(nb) unregister_memory_notifier(nb)
#else
#define hotplug_memory_notifier(fn, pri) ({ 0; })
static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri)
{
return 0;
}
/* These aren't inline functions due to a GCC bug. */
#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; })
#define unregister_hotmemory_notifier(nb) ({ (void)(nb); })
Expand Down
42 changes: 21 additions & 21 deletions mm/migrate.c
Original file line number Diff line number Diff line change
Expand Up @@ -3066,7 +3066,7 @@ void migrate_vma_finalize(struct migrate_vma *migrate)
EXPORT_SYMBOL(migrate_vma_finalize);
#endif /* CONFIG_DEVICE_PRIVATE */

#if defined(CONFIG_MEMORY_HOTPLUG)
#if defined(CONFIG_HOTPLUG_CPU)
/* Disable reclaim-based migration. */
static void __disable_all_migrate_targets(void)
{
Expand Down Expand Up @@ -3208,25 +3208,6 @@ static void set_migration_target_nodes(void)
put_online_mems();
}

/*
* React to hotplug events that might affect the migration targets
* like events that online or offline NUMA nodes.
*
* The ordering is also currently dependent on which nodes have
* CPUs. That means we need CPU on/offline notification too.
*/
static int migration_online_cpu(unsigned int cpu)
{
set_migration_target_nodes();
return 0;
}

static int migration_offline_cpu(unsigned int cpu)
{
set_migration_target_nodes();
return 0;
}

/*
* This leaves migrate-on-reclaim transiently disabled between
* the MEM_GOING_OFFLINE and MEM_OFFLINE events. This runs
Expand Down Expand Up @@ -3284,6 +3265,25 @@ static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
return notifier_from_errno(0);
}

/*
* React to hotplug events that might affect the migration targets
* like events that online or offline NUMA nodes.
*
* The ordering is also currently dependent on which nodes have
* CPUs. That means we need CPU on/offline notification too.
*/
static int migration_online_cpu(unsigned int cpu)
{
set_migration_target_nodes();
return 0;
}

static int migration_offline_cpu(unsigned int cpu)
{
set_migration_target_nodes();
return 0;
}

static int __init migrate_on_reclaim_init(void)
{
int ret;
Expand All @@ -3303,4 +3303,4 @@ static int __init migrate_on_reclaim_init(void)
return 0;
}
late_initcall(migrate_on_reclaim_init);
#endif /* CONFIG_MEMORY_HOTPLUG */
#endif /* CONFIG_HOTPLUG_CPU */
4 changes: 1 addition & 3 deletions mm/page_ext.c
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
total_usage += table_size;
return 0;
}
#ifdef CONFIG_MEMORY_HOTPLUG

static void free_page_ext(void *addr)
{
if (is_vmalloc_addr(addr)) {
Expand Down Expand Up @@ -374,8 +374,6 @@ static int __meminit page_ext_callback(struct notifier_block *self,
return notifier_from_errno(ret);
}

#endif

void __init page_ext_init(void)
{
unsigned long pfn;
Expand Down
4 changes: 2 additions & 2 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -1095,7 +1095,7 @@ static int slab_offline_cpu(unsigned int cpu)
return 0;
}

#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
#if defined(CONFIG_NUMA)
/*
* Drains freelist for a node on each slab cache, used for memory hot-remove.
* Returns -EBUSY if all objects cannot be drained so that the node is not
Expand Down Expand Up @@ -1157,7 +1157,7 @@ static int __meminit slab_memory_callback(struct notifier_block *self,
out:
return notifier_from_errno(ret);
}
#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
#endif /* CONFIG_NUMA */

/*
* swap the static kmem_cache_node with kmalloced memory
Expand Down

0 comments on commit 76af6a0

Please sign in to comment.