Skip to content

Commit

Permalink
Add a configure option to group pages by mobility
Browse files Browse the repository at this point in the history
The grouping mechanism has some memory overhead and a more complex allocation
path.  This patch allows the strategy to be disabled for small memory systems
or if it is known the workload is suffering because of the strategy.  It also
acts to show where the page groupings strategy interacts with the standard
buddy allocator.

Signed-off-by: Mel Gorman <[email protected]>
Signed-off-by: Joel Schopp <[email protected]>
Cc: Andy Whitcroft <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
gormanm authored and Linus Torvalds committed Oct 16, 2007
1 parent 535131e commit b92a6ed
Show file tree
Hide file tree
Showing 3 changed files with 56 additions and 14 deletions.
6 changes: 6 additions & 0 deletions include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,15 @@
*/
#define PAGE_ALLOC_COSTLY_ORDER 3

#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
#define MIGRATE_UNMOVABLE 0
#define MIGRATE_MOVABLE 1
#define MIGRATE_TYPES 2
#else
#define MIGRATE_UNMOVABLE 0
#define MIGRATE_MOVABLE 0
#define MIGRATE_TYPES 1
#endif

#define for_each_migratetype_order(order, type) \
for (order = 0; order < MAX_ORDER; order++) \
Expand Down
13 changes: 13 additions & 0 deletions init/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -607,6 +607,19 @@ config BASE_SMALL
default 0 if BASE_FULL
default 1 if !BASE_FULL

config PAGE_GROUP_BY_MOBILITY
bool "Group pages based on their mobility in the page allocator"
def_bool y
help
The standard allocator will fragment memory over time which means
that high order allocations will fail even if kswapd is running. If
this option is set, the allocator will try and group page types
based on their ability to migrate or reclaim. This is a best effort
attempt at lowering fragmentation which a few workloads care about.
The loss is a more complex allocator that may perform slower. If
you are interested in working with large pages, say Y and set
/proc/sys/vm/min_free_bytes to 16374. Otherwise say N

menuconfig MODULES
bool "Enable loadable module support"
help
Expand Down
51 changes: 37 additions & 14 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@ int nr_node_ids __read_mostly = MAX_NUMNODES;
EXPORT_SYMBOL(nr_node_ids);
#endif

#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
static inline int get_pageblock_migratetype(struct page *page)
{
return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
Expand All @@ -174,6 +175,22 @@ static inline int gfpflags_to_migratetype(gfp_t gfp_flags)
return ((gfp_flags & __GFP_MOVABLE) != 0);
}

#else
static inline int get_pageblock_migratetype(struct page *page)
{
return MIGRATE_UNMOVABLE;
}

static void set_pageblock_migratetype(struct page *page, int migratetype)
{
}

static inline int gfpflags_to_migratetype(gfp_t gfp_flags)
{
return MIGRATE_UNMOVABLE;
}
#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */

#ifdef CONFIG_DEBUG_VM
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
{
Expand Down Expand Up @@ -653,6 +670,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
return 0;
}

#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
/*
* This array describes the order lists are fallen back to when
* the free lists for the desirable migrate type are depleted
Expand Down Expand Up @@ -709,6 +727,13 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,

return NULL;
}
#else
static struct page *__rmqueue_fallback(struct zone *zone, int order,
int start_migratetype)
{
return NULL;
}
#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */

/*
* Do the hard work of removing an element from the buddy allocator.
Expand Down Expand Up @@ -953,27 +978,25 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist,
if (unlikely(!pcp->count))
goto failed;
}

#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
/* Find a page of the appropriate migrate type */
list_for_each_entry(page, &pcp->list, lru) {
if (page_private(page) == migratetype) {
list_del(&page->lru);
pcp->count--;
list_for_each_entry(page, &pcp->list, lru)
if (page_private(page) == migratetype)
break;
}
}

/*
* Check if a page of the appropriate migrate type
* was found. If not, allocate more to the pcp list
*/
if (&page->lru == &pcp->list) {
/* Allocate more to the pcp list if necessary */
if (unlikely(&page->lru == &pcp->list)) {
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, &pcp->list, migratetype);
page = list_entry(pcp->list.next, struct page, lru);
VM_BUG_ON(page_private(page) != migratetype);
list_del(&page->lru);
pcp->count--;
}
#else
page = list_entry(pcp->list.next, struct page, lru);
#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */

list_del(&page->lru);
pcp->count--;
} else {
spin_lock_irqsave(&zone->lock, flags);
page = __rmqueue(zone, order, migratetype);
Expand Down

0 comments on commit b92a6ed

Please sign in to comment.