Skip to content

Commit

Permalink
memory hotplug: free memmaps allocated by bootmem
Browse files Browse the repository at this point in the history
This patch is to free memmaps which is allocated by bootmem.

Freeing usemap is not necessary.  The pages of usemap may be necessary for
other sections.

If removing section is last section on the node, its section is the final user
of usemap page.  (usemaps are allocated on its section by previous patch.) But
it shouldn't be freed too, because the section must be logical offline state
which all pages are isolated against page allocater.  If it is freed, page
alloctor may use it which will be removed physically soon.  It will be
disaster.  So, this patch keeps it as it is.

Signed-off-by: Yasunori Goto <[email protected]>
Cc: Badari Pulavarty <[email protected]>
Cc: Yinghai Lu <[email protected]>
Cc: Yasunori Goto <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Yasunori Goto authored and torvalds committed Apr 28, 2008
1 parent 86f6dae commit 0c0a4a5
Show file tree
Hide file tree
Showing 4 changed files with 60 additions and 7 deletions.
3 changes: 1 addition & 2 deletions mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,7 @@ static inline void __put_page(struct page *page)
atomic_dec(&page->_count);
}

extern void __init __free_pages_bootmem(struct page *page,
unsigned int order);
extern void __free_pages_bootmem(struct page *page, unsigned int order);

/*
* function for dealing with page's order in buddy system.
Expand Down
11 changes: 11 additions & 0 deletions mm/memory_hotplug.c
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,16 @@ static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
return register_new_memory(__pfn_to_section(phys_start_pfn));
}

#ifdef CONFIG_SPARSEMEM_VMEMMAP
static int __remove_section(struct zone *zone, struct mem_section *ms)
{
/*
* XXX: Freeing memmap with vmemmap is not implement yet.
* This should be removed later.
*/
return -EBUSY;
}
#else
static int __remove_section(struct zone *zone, struct mem_section *ms)
{
unsigned long flags;
Expand All @@ -216,6 +226,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms)
pgdat_resize_unlock(pgdat, &flags);
return 0;
}
#endif

/*
* Reasonably generic function for adding memory. It is
Expand Down
2 changes: 1 addition & 1 deletion mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -546,7 +546,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
/*
* permit the bootmem allocator to evade page validation on high-order frees
*/
void __init __free_pages_bootmem(struct page *page, unsigned int order)
void __free_pages_bootmem(struct page *page, unsigned int order)
{
if (order == 0) {
__ClearPageReserved(page);
Expand Down
51 changes: 47 additions & 4 deletions mm/sparse.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include "internal.h"
#include <asm/dma.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
Expand Down Expand Up @@ -376,6 +377,9 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
{
return; /* XXX: Not implemented yet */
}
static void free_map_bootmem(struct page *page, unsigned long nr_pages)
{
}
#else
static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
{
Expand Down Expand Up @@ -413,28 +417,67 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
free_pages((unsigned long)memmap,
get_order(sizeof(struct page) * nr_pages));
}

static void free_map_bootmem(struct page *page, unsigned long nr_pages)
{
unsigned long maps_section_nr, removing_section_nr, i;
int magic;

for (i = 0; i < nr_pages; i++, page++) {
magic = atomic_read(&page->_mapcount);

BUG_ON(magic == NODE_INFO);

maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
removing_section_nr = page->private;

/*
* When this function is called, the removing section is
* logical offlined state. This means all pages are isolated
* from page allocator. If removing section's memmap is placed
* on the same section, it must not be freed.
* If it is freed, page allocator may allocate it which will
* be removed physically soon.
*/
if (maps_section_nr != removing_section_nr)
put_page_bootmem(page);
}
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */

static void free_section_usemap(struct page *memmap, unsigned long *usemap)
{
struct page *usemap_page;
unsigned long nr_pages;

if (!usemap)
return;

usemap_page = virt_to_page(usemap);
/*
* Check to see if allocation came from hot-plug-add
*/
if (PageSlab(virt_to_page(usemap))) {
if (PageSlab(usemap_page)) {
kfree(usemap);
if (memmap)
__kfree_section_memmap(memmap, PAGES_PER_SECTION);
return;
}

/*
* TODO: Allocations came from bootmem - how do I free up ?
* The usemap came from bootmem. This is packed with other usemaps
* on the section which has pgdat at boot time. Just keep it as is now.
*/
printk(KERN_WARNING "Not freeing up allocations from bootmem "
"- leaking memory\n");

if (memmap) {
struct page *memmap_page;
memmap_page = virt_to_page(memmap);

nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
>> PAGE_SHIFT;

free_map_bootmem(memmap_page, nr_pages);
}
}

/*
Expand Down

0 comments on commit 0c0a4a5

Please sign in to comment.