Skip to content

Commit

Permalink
mm: remove the memory isolate notifier
Browse files Browse the repository at this point in the history
Luckily, we have no users left, so we can get rid of it.  Cleanup
set_migratetype_isolate() a little bit.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: David Hildenbrand <[email protected]>
Reviewed-by: Greg Kroah-Hartman <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Cc: "Rafael J. Wysocki" <[email protected]>
Cc: Pavel Tatashin <[email protected]>
Cc: Dan Williams <[email protected]>
Cc: Oscar Salvador <[email protected]>
Cc: Qian Cai <[email protected]>
Cc: Anshuman Khandual <[email protected]>
Cc: Pingfan Liu <[email protected]>
Cc: Michael Ellerman <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
davidhildenbrand authored and torvalds committed Jan 31, 2020
1 parent 3f13535 commit 3f9903b
Show file tree
Hide file tree
Showing 3 changed files with 4 additions and 80 deletions.
19 changes: 0 additions & 19 deletions drivers/base/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -70,20 +70,6 @@ void unregister_memory_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(unregister_memory_notifier);

static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain);

int register_memory_isolate_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&memory_isolate_chain, nb);
}
EXPORT_SYMBOL(register_memory_isolate_notifier);

void unregister_memory_isolate_notifier(struct notifier_block *nb)
{
atomic_notifier_chain_unregister(&memory_isolate_chain, nb);
}
EXPORT_SYMBOL(unregister_memory_isolate_notifier);

static void memory_block_release(struct device *dev)
{
struct memory_block *mem = to_memory_block(dev);
Expand Down Expand Up @@ -175,11 +161,6 @@ int memory_notify(unsigned long val, void *v)
return blocking_notifier_call_chain(&memory_chain, val, v);
}

int memory_isolate_notify(unsigned long val, void *v)
{
return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
}

/*
* The probe routines leave the pages uninitialized, just as the bootmem code
* does. Make sure we do not access them, but instead use only information from
Expand Down
27 changes: 0 additions & 27 deletions include/linux/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,19 +55,6 @@ struct memory_notify {
int status_change_nid;
};

/*
* During pageblock isolation, count the number of pages within the
* range [start_pfn, start_pfn + nr_pages) which are owned by code
* in the notifier chain.
*/
#define MEM_ISOLATE_COUNT (1<<0)

struct memory_isolate_notify {
unsigned long start_pfn; /* Start of range to check */
unsigned int nr_pages; /* # pages in range to check */
unsigned int pages_found; /* # pages owned found by callbacks */
};

struct notifier_block;
struct mem_section;

Expand All @@ -94,27 +81,13 @@ static inline int memory_notify(unsigned long val, void *v)
{
return 0;
}
static inline int register_memory_isolate_notifier(struct notifier_block *nb)
{
return 0;
}
static inline void unregister_memory_isolate_notifier(struct notifier_block *nb)
{
}
static inline int memory_isolate_notify(unsigned long val, void *v)
{
return 0;
}
#else
extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
extern int register_memory_isolate_notifier(struct notifier_block *nb);
extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
int create_memory_block_devices(unsigned long start, unsigned long size);
void remove_memory_block_devices(unsigned long start, unsigned long size);
extern void memory_dev_init(void);
extern int memory_notify(unsigned long val, void *v);
extern int memory_isolate_notify(unsigned long val, void *v);
extern struct memory_block *find_memory_block(struct mem_section *);
typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
extern int walk_memory_blocks(unsigned long start, unsigned long size,
Expand Down
38 changes: 4 additions & 34 deletions mm/page_isolation.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@
static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
{
struct zone *zone;
unsigned long flags, pfn;
struct memory_isolate_notify arg;
int notifier_ret;
unsigned long flags;
int ret = -EBUSY;

zone = page_zone(page);
Expand All @@ -35,41 +33,11 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
if (is_migrate_isolate_page(page))
goto out;

pfn = page_to_pfn(page);
arg.start_pfn = pfn;
arg.nr_pages = pageblock_nr_pages;
arg.pages_found = 0;

/*
* It may be possible to isolate a pageblock even if the
* migratetype is not MIGRATE_MOVABLE. The memory isolation
* notifier chain is used by balloon drivers to return the
* number of pages in a range that are held by the balloon
* driver to shrink memory. If all the pages are accounted for
* by balloons, are free, or on the LRU, isolation can continue.
* Later, for example, when memory hotplug notifier runs, these
* pages reported as "can be isolated" should be isolated(freed)
* by the balloon driver through the memory notifier chain.
*/
notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
notifier_ret = notifier_to_errno(notifier_ret);
if (notifier_ret)
goto out;
/*
* FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
* We just check MOVABLE pages.
*/
if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
isol_flags))
ret = 0;

/*
* immobile means "not-on-lru" pages. If immobile is larger than
* removable-by-driver pages reported by notifier, we'll fail.
*/

out:
if (!ret) {
if (!has_unmovable_pages(zone, page, 0, migratetype, isol_flags)) {
unsigned long nr_pages;
int mt = get_pageblock_migratetype(page);

Expand All @@ -79,8 +47,10 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
NULL);

__mod_zone_freepage_state(zone, -nr_pages, mt);
ret = 0;
}

out:
spin_unlock_irqrestore(&zone->lock, flags);
if (!ret)
drain_all_pages(zone);
Expand Down

0 comments on commit 3f9903b

Please sign in to comment.