Skip to content

Commit

Permalink
mm, memory_hotplug: get rid of is_zone_device_section
Browse files Browse the repository at this point in the history
Device memory hotplug hooks into regular memory hotplug only half way.
It needs memory sections to track struct pages but there is no
need/desire to associate those sections with memory blocks and export
them to the userspace via sysfs because they cannot be onlined anyway.

This is currently expressed by for_device argument to arch_add_memory
which then makes sure to associate the given memory range with
ZONE_DEVICE.  register_new_memory then relies on is_zone_device_section
to distinguish special memory hotplug from the regular one.  While this
works now, later patches in this series want to move __add_zone outside
of arch_add_memory path so we have to come up with something else.

Add want_memblock down the __add_pages path and use it to control
whether the section->memblock association should be done.
arch_add_memory then just trivially want memblock for everything but
for_device hotplug.

remove_memory_section doesn't need is_zone_device_section either.  We
can simply skip all the memblock specific cleanup if there is no
memblock for the given section.

This shouldn't introduce any functional change.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Michal Hocko <[email protected]>
Tested-by: Dan Williams <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
Cc: Andi Kleen <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: Balbir Singh <[email protected]>
Cc: Daniel Kiper <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Heiko Carstens <[email protected]>
Cc: Igor Mammedov <[email protected]>
Cc: Jerome Glisse <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Martin Schwidefsky <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Reza Arbab <[email protected]>
Cc: Tobias Regnery <[email protected]>
Cc: Toshi Kani <[email protected]>
Cc: Vitaly Kuznetsov <[email protected]>
Cc: Xishi Qiu <[email protected]>
Cc: Yasuaki Ishimatsu <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Michal Hocko authored and torvalds committed Jul 6, 2017
1 parent bfe63d3 commit 1b862ae
Show file tree
Hide file tree
Showing 9 changed files with 22 additions and 24 deletions.
2 changes: 1 addition & 1 deletion arch/ia64/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -658,7 +658,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device)

zone = pgdat->node_zones +
zone_for_memory(nid, start, size, ZONE_NORMAL, for_device);
ret = __add_pages(nid, zone, start_pfn, nr_pages);
ret = __add_pages(nid, zone, start_pfn, nr_pages, !for_device);

if (ret)
printk("%s: Problem encountered in __add_pages() as ret=%d\n",
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/mm/mem.c
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
zone = pgdata->node_zones +
zone_for_memory(nid, start, size, 0, for_device);

return __add_pages(nid, zone, start_pfn, nr_pages);
return __add_pages(nid, zone, start_pfn, nr_pages, !for_device);
}

#ifdef CONFIG_MEMORY_HOTREMOVE
Expand Down
2 changes: 1 addition & 1 deletion arch/s390/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
continue;
nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
zone_end_pfn - start_pfn : size_pages;
rc = __add_pages(nid, zone, start_pfn, nr_pages);
rc = __add_pages(nid, zone, start_pfn, nr_pages, !for_device);
if (rc)
break;
start_pfn += nr_pages;
Expand Down
2 changes: 1 addition & 1 deletion arch/sh/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -498,7 +498,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
ret = __add_pages(nid, pgdat->node_zones +
zone_for_memory(nid, start, size, ZONE_NORMAL,
for_device),
start_pfn, nr_pages);
start_pfn, nr_pages, !for_device);
if (unlikely(ret))
printk("%s: Failed, __add_pages() == %d\n", __func__, ret);

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/mm/init_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -831,7 +831,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;

return __add_pages(nid, zone, start_pfn, nr_pages);
return __add_pages(nid, zone, start_pfn, nr_pages, !for_device);
}

#ifdef CONFIG_MEMORY_HOTREMOVE
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/mm/init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -787,7 +787,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device)

init_memory_mapping(start, start + size);

ret = __add_pages(nid, zone, start_pfn, nr_pages);
ret = __add_pages(nid, zone, start_pfn, nr_pages, !for_device);
WARN_ON_ONCE(ret);

/* update max_pfn, max_low_pfn and high_memory */
Expand Down
23 changes: 9 additions & 14 deletions drivers/base/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -685,14 +685,6 @@ static int add_memory_block(int base_section_nr)
return 0;
}

static bool is_zone_device_section(struct mem_section *ms)
{
struct page *page;

page = sparse_decode_mem_map(ms->section_mem_map, __section_nr(ms));
return is_zone_device_page(page);
}

/*
* need an interface for the VM to add new memory regions,
* but without onlining it.
Expand All @@ -702,9 +694,6 @@ int register_new_memory(int nid, struct mem_section *section)
int ret = 0;
struct memory_block *mem;

if (is_zone_device_section(section))
return 0;

mutex_lock(&mem_sysfs_mutex);

mem = find_memory_block(section);
Expand Down Expand Up @@ -741,11 +730,16 @@ static int remove_memory_section(unsigned long node_id,
{
struct memory_block *mem;

if (is_zone_device_section(section))
return 0;

mutex_lock(&mem_sysfs_mutex);

/*
* Some users of the memory hotplug do not want/need memblock to
* track all sections. Skip over those.
*/
mem = find_memory_block(section);
if (!mem)
goto out_unlock;

unregister_mem_sect_under_nodes(mem, __section_nr(section));

mem->section_count--;
Expand All @@ -754,6 +748,7 @@ static int remove_memory_section(unsigned long node_id,
else
put_device(&mem->dev);

out_unlock:
mutex_unlock(&mem_sysfs_mutex);
return 0;
}
Expand Down
2 changes: 1 addition & 1 deletion include/linux/memory_hotplug.h
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ extern int __remove_pages(struct zone *zone, unsigned long start_pfn,

/* reasonably generic interface to expand the physical pages in a zone */
extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages);
unsigned long nr_pages, bool want_memblock);

#ifdef CONFIG_NUMA
extern int memory_add_physaddr_to_nid(u64 start);
Expand Down
9 changes: 6 additions & 3 deletions mm/memory_hotplug.c
Original file line number Diff line number Diff line change
Expand Up @@ -494,7 +494,7 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
}

static int __meminit __add_section(int nid, struct zone *zone,
unsigned long phys_start_pfn)
unsigned long phys_start_pfn, bool want_memblock)
{
int ret;

Expand All @@ -511,6 +511,9 @@ static int __meminit __add_section(int nid, struct zone *zone,
if (ret < 0)
return ret;

if (!want_memblock)
return 0;

return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
}

Expand All @@ -521,7 +524,7 @@ static int __meminit __add_section(int nid, struct zone *zone,
* add the new pages.
*/
int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
unsigned long nr_pages)
unsigned long nr_pages, bool want_memblock)
{
unsigned long i;
int err = 0;
Expand Down Expand Up @@ -549,7 +552,7 @@ int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
}

for (i = start_sec; i <= end_sec; i++) {
err = __add_section(nid, zone, section_nr_to_pfn(i));
err = __add_section(nid, zone, section_nr_to_pfn(i), want_memblock);

/*
* EEXIST is finally dealt with by ioresource collision
Expand Down

0 comments on commit 1b862ae

Please sign in to comment.